Beispiel #1
0
def calc_we(cdata,basedir,log_target_rate):
     # WE parameters
    we_dir = os.path.join(basedir,cdata['name'],'analysis')
    we_dt = cdata['tau']
    we_nbins = cdata['nbins']
    we_target_count = cdata['target_count']

    winsize_flux = cdata['analysis']['winsize_flux']
    winsize_err = cdata['analysis']['winsize_err']
    last_n = cdata['analysis']['last_n']

    we_nframes = 0

    we_data_files = glob(os.path.join(we_dir,'*/rate.h5'))

    for fname in we_data_files:
        f = h5py.File(fname,'r')
        we_nframes = max(we_nframes,f.attrs['last_completed_iter']-2)
        f.close() 

    we_err = np.empty((len(we_data_files),2,we_nframes))
    we_err.fill(np.nan)

    for k,fname in enumerate(we_data_files):
        print 'we: {}'.format(fname)
        f = h5py.File(fname,'r')
        s = f['data'][:]
        dget = min(we_nframes,s.shape[0])
        s = s[:dget,:]

        ss = np.empty_like(s)
        for i in xrange(4):
            ss[:,i] = smooth(s[:,i],winsize_flux,'flat')

        sm = s[-last_n:,:].sum(0)

        rAB = (1.0*ss[:,1]) / (we_dt*ss[:,2])
        rBA = (1.0*ss[:,0]) / (we_dt*ss[:,3])

        rABm = (1.0*sm[1]) / (we_dt*sm[2])
        rBAm = (1.0*sm[0]) / (we_dt*sm[3])

        print 'we_{} -- kAB: {}, kBA: {}'.format(k,rABm,rBAm)

        we_err[k,0,:rAB.shape[0]] = logfunc(rAB) - log_target_rate[0]
        we_err[k,1,:rBA.shape[0]] = logfunc(rBA) - log_target_rate[1]

        f.close()

    we_err = np.abs(we_err)

    #we_err_avg = np.sqrt(np.mean(we_err**2,0))
    we_err_avg = np.sqrt(bn.nanmean(we_err**2,0))

    for i in xrange(2):
        we_err_avg[i,:] = smooth(we_err_avg[i,:],winsize_err,'flat')

    we_t = we_dt * we_nbins * we_target_count * np.arange(we_nframes)

    return we_err_avg, we_t
Beispiel #2
0
    def smooth(self,smooth,downsample=True,**kwargs):
        """
        Smooth the spectrum by factor `smooth`.  


        Documentation from the :mod:`smooth` module:

        Parameters
        ----------
        downsample: bool
            Downsample the spectrum by the smoothing factor?
        """
        smooth = round(smooth)
        self.data = sm.smooth(self.data,smooth,downsample=downsample,**kwargs)

        if downsample:
            self.xarr = self.xarr[::smooth]
            if len(self.xarr) != len(self.data):
                raise ValueError("Convolution resulted in different X and Y array lengths.  Convmode should be 'same'.")
            if self.error is not None:
                self.error = sm.smooth(self.error,smooth,**kwargs)
            self.baseline.downsample(smooth)
            self.specfit.downsample(smooth)
    
            self._smooth_header(smooth)
Beispiel #3
0
def kinematic_params(mech_x_l, time_list, smooth_window):
    mech_time_mat = np.matrix(time_list)
    tstep_size = .03333333333
    num_el = mech_time_mat.shape[1]
    uniform_time2 = np.cumsum(np.round((mech_time_mat[0,1:] - mech_time_mat[0,0:-1]) / tstep_size) * tstep_size)
    uniform_time2 = np.column_stack((np.matrix([0]), uniform_time2))

    mech_x_mat = np.matrix(mech_x_l)
    if uniform_time2.shape[1] != mech_x_mat.shape[1]:
        pdb.set_trace()

    mech_x_mat = np.matrix(smooth.smooth(mech_x_mat.A1, smooth_window,
                                         'blackman'))
    uniform_time2 = uniform_time2[:,smooth_window-1:-smooth_window+1]

    vel = gradient(uniform_time2, mech_x_mat)
    uniform_time2 = uniform_time2[:,1:-1]

    vel = np.matrix(smooth.smooth(vel.A1, smooth_window, 'blackman'))
    mech_x_mat = mech_x_mat[:,smooth_window-1:-smooth_window+1]
    uniform_time2 = uniform_time2[:,smooth_window-1:-smooth_window+1]

    acc = gradient(uniform_time2, vel)
    uniform_time2 = uniform_time2[:,1:-1]
    vel = vel[:,1:-1]
    mech_x_mat = mech_x_mat[:,2:-2]

    acc = np.matrix(smooth.smooth(acc.A1, smooth_window, 'blackman'))
    vel = vel[:,smooth_window-1:-smooth_window+1]
    mech_x_mat = mech_x_mat[:,smooth_window-1:-smooth_window+1]
    uniform_time2 = uniform_time2[:,smooth_window-1:-smooth_window+1]

    return mech_x_mat.A1.tolist(), vel.A1.tolist(), acc.A1.tolist(), uniform_time2.A1.tolist()
def get_matrix_for_textfile(data, img_size_crop_x, img_size_crop_y, stim, zz, time_start, time_end,\
    f_f_flag, dff_start, dff_end,stim_start,stim_end,filename, pp):
    
    #Cropping unwanted pixels as specified by user
    if img_size_crop_x!= 0 and img_size_crop_y!=0:
        print "Cropping x and y pixels.."
        data1 = data[img_size_crop_y:-img_size_crop_y, img_size_crop_x:-img_size_crop_x]
    elif img_size_crop_x==0 and img_size_crop_y!=0:
        print "Cropping only y pixels.."
        data1 = data[img_size_crop_y:-img_size_crop_y, :]
    elif img_size_crop_x!=0 and img_size_crop_y==0:
        print "Cropping only x pixels.."
        data1 = data[:, img_size_crop_x:-img_size_crop_x]
    else:
        data1 = data
   
    print 'Creating array from stack for Stim ' + stim + ' Z='+ str(filename)
    temp_matfile_for_thunder = np.zeros([np.size(data1, axis=0)*np.size(data1, axis=1),3+(time_end-time_start+1)+smooth_window-2], dtype=np.int)

    count = 0    
    for yy in xrange(0,np.size(data1, axis=1)):
        for xx in xrange(0,np.size(data1, axis=0)): 
            temp_matfile_for_thunder[count,0] = xx+1;
            temp_matfile_for_thunder[count,1] = yy+1;
            temp_matfile_for_thunder[count,2] = zz;
            # Create delta f/f values if necessary
            if f_f_flag==0:
                temp_matfile_for_thunder[count,3:] = smooth(data1[xx,yy,time_start:time_end],smooth_window,'hanning')
            else:
                temp_matfile_for_thunder[count,3:] = smooth(((data1[xx,yy,time_start:time_end]-np.mean(data1[xx,yy,dff_start:dff_end]))/np.std(data1[xx,yy,dff_start:dff_end])),smooth_window,'hanning')
            count = count+1 
    
    #Plot heatmap for validation    
    with sns.axes_style("white"):
        A = temp_matfile_for_thunder[:,3:]    
        B = np.argsort(np.mean(A, axis=1))  
        C = A[B,:]
        if f_f_flag == 1: #Plot with correct climif dff is true
            fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet',vmin=-5, vmax=5)
        else:
            fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet')
        plot_vertical_lines(stim_start-time_start,stim_end-time_start)
        labels, locs = plt.xticks()
        labels1 = [int(item) for item in labels]
        labels2 = [str(int(item)+time_start) for item in labels]
        plt.xticks((labels1),(labels2))
        plt.xlim(0,(time_end-time_start))
        plt.title('Sorted Heatmap Z='+str(filename))
        plt.colorbar()
        fig2 = plt.gcf()
        pp.savefig(fig2)
        plt.close()
        A = None    

    return temp_matfile_for_thunder
def get_matrix_for_textfile(data_mat, name_for_saving_files, num_z_planes, time_start,time_end, f_f_flag, dff_start, dff_end, stimulus_on_time, stimulus_off_time, smooth_window, pp):
    
    #Save as numpy array
    print 'Creating array from stack for ' + name_for_saving_files
    if smooth_window!=0:                
        temp_numpy_array_for_thunder = np.zeros([np.size(data_mat, axis=0)*np.size(data_mat, axis=1)*np.size(data_mat,axis=2),3+(time_end-time_start+1)+smooth_window-2], dtype=np.int)
    else:
        temp_numpy_array_for_thunder = np.zeros([np.size(data_mat, axis=0)*np.size(data_mat, axis=1)*np.size(data_mat,axis=2),3+(time_end-time_start)], dtype=np.int)

    print np.shape(temp_numpy_array_for_thunder)    
    count = 0  
    count1 = 0 
    for zz in xrange(0,np.size(num_z_planes,axis=0)):
        for yy in xrange(0,np.size(data_mat, axis=1)):
            for xx in xrange(0,np.size(data_mat, axis=0)): 
                temp_numpy_array_for_thunder[count,0] = xx+1;
                temp_numpy_array_for_thunder[count,1] = yy+1;
                temp_numpy_array_for_thunder[count,2] = num_z_planes[zz];
                # Create delta f/f values if necessary
                if smooth_window!=0:                
                    if f_f_flag==0:
                        temp_numpy_array_for_thunder[count,3:] = smooth(data_mat[xx,yy,zz,time_start:time_end],smooth_window,'hanning')
                    else:
                        temp_numpy_array_for_thunder[count,3:] = smooth(((data_mat[xx,yy,zz,time_start:time_end]-np.mean(data_mat[xx,yy,zz,dff_start:dff_end]))/np.std(data_mat[xx,yy,zz,dff_start:dff_end])),smooth_window,'hanning')
                else:
                    if f_f_flag==0:
                        temp_numpy_array_for_thunder[count,3:] = data_mat[xx,yy,zz,time_start:time_end]
                    else:
                        temp_numpy_array_for_thunder[count,3:] = ((data_mat[xx,yy,zz,time_start:time_end]-np.mean(data_mat[xx,yy,zz,dff_start:dff_end]))/np.std(data_mat[xx,yy,zz,dff_start:dff_end]))
                count = count+1 
                
        #Plot heatmap for validation    
        with sns.axes_style("white"):
            A = temp_numpy_array_for_thunder[count1:count-1,3:]
            count1 = count-1
            B = np.argsort(np.mean(A, axis=1))  
            C = A[B,:]
            if f_f_flag == 1: #Plot with correct clim if dff is true
                fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet',vmin=-5, vmax=5)
            else:
                fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet')
            
            plot_vertical_lines_onset(stimulus_on_time)
            plot_vertical_lines_offset(stimulus_off_time)
            plt.title(name_for_saving_files +' Z='+ str(zz+1))
            plt.colorbar()
            fig2 = plt.gcf()
            pp.savefig(fig2)
            plt.close()


    return temp_numpy_array_for_thunder
Beispiel #6
0
    def smooth(self, smooth, **kwargs):
        """
        Smooth the spectrum by factor "smooth".  Options are defined in sm.smooth

        because 'Spectra' does not have a header attribute, don't do anything to it...
        """
        smooth = round(smooth)
        self.data = sm.smooth(self.data, smooth, **kwargs)
        self.xarr = self.xarr[::smooth]
        if len(self.xarr) != len(self.data):
            raise ValueError("Convolution resulted in different X and Y array lengths.  Convmode should be 'same'.")
        self.error = sm.smooth(self.error, smooth, **kwargs)
        self.baseline.downsample(smooth)
        self.specfit.downsample(smooth)
def pop_spectra(self,row,col):
    self.laserFlashFile.switchOffHotPixTimeMask()
    dataDict = self.laserFlashFile.getTimedPacketList(row,col,timeSpacingCut=0.001)
    peakHeights=np.asarray(dataDict['peakHeights'])*1.0
    baselines=np.asarray(dataDict['baselines'])*1.0
    peakHeights-=baselines
    biggest_photon = int(min(peakHeights))
    n_inbin,phase_bins=np.histogram(peakHeights,bins=np.abs(biggest_photon),range=(biggest_photon,0))
    phase_bins=(phase_bins+(phase_bins[1]-phase_bins[0])/2.0)[:-1]
    try:
        last_ind = np.where(n_inbin>5)[0][-1]
    except IndexError:
        last_ind=len(n_inbin)-1
    expTime = self.laserFlashFile.hotPixTimeMask.expTime
    self.axes.plot(phase_bins,n_inbin*1.0/expTime, 'k.',alpha=0.5,label="raw")
    self.axes.set_xlim(phase_bins[(np.where(n_inbin >= 3))[0][0]],phase_bins[last_ind])

    self.laserFlashFile.switchOnHotPixTimeMask(reasons=['laser not on','hot pixel'])
    dataDict = self.laserFlashFile.getTimedPacketList(row,col,timeSpacingCut=0.001)
    peakHeights=np.asarray(dataDict['peakHeights'])*1.0
    baselines=np.asarray(dataDict['baselines'])*1.0
    peakHeights-=baselines
    biggest_photon = int(min(peakHeights))
    n_inbin,phase_bins=np.histogram(peakHeights,bins=np.abs(biggest_photon),range=(biggest_photon,0))
    phase_bins_1=(phase_bins+(phase_bins[1]-phase_bins[0])/2.0)[:-1]
    intTime_1 = self.laserFlashFile.hotPixTimeMask.getEffIntTime(row,col)
    smoothed_1 = np.asarray(smooth.smooth(n_inbin, 30, 'hanning'))
    self.axes.plot(phase_bins_1,n_inbin*1.0/intTime_1, 'b.',label="laser on, hotpix masked")
    
    self.laserFlashFile.switchOnHotPixTimeMask(reasons=['laser not off','hot pixel'])
    dataDict = self.laserFlashFile.getTimedPacketList(row,col,timeSpacingCut=0.001)
    peakHeights=np.asarray(dataDict['peakHeights'])*1.0
    baselines=np.asarray(dataDict['baselines'])*1.0
    peakHeights-=baselines
    biggest_photon = int(min(peakHeights))
    n_inbin,phase_bins=np.histogram(peakHeights,bins=np.abs(biggest_photon),range=(biggest_photon,0))
    phase_bins_2=(phase_bins+(phase_bins[1]-phase_bins[0])/2.0)[:-1]
    intTime_2 = self.laserFlashFile.hotPixTimeMask.getEffIntTime(row,col)
    smoothed_2 = np.asarray(smooth.smooth(n_inbin, 30, 'hanning'))
    self.axes.plot(phase_bins_2,n_inbin*1.0/intTime_2, 'g.',label="laser off, hotpix masked")
    
    self.axes.legend(loc=2)
    
    self.axes.plot(phase_bins_1, smoothed_1*1.0/intTime_1,'b-')
    self.axes.plot(phase_bins_2,smoothed_2*1.0/intTime_2,'g-')
    
    self.axes.set_xlabel('phase [ADC/DAC units]')
    self.axes.set_ylabel('Exposure time adjusted count rate [photons/s]')
Beispiel #8
0
def smooth_traces(browser):
    """ Smooth traces
    
    Options:
    1) Window type
    2) Window length
    """
    
    # Get options
    window = str(browser.ui.toolStackedWidget.smoothComboBox.currentText())
    window_len = float(browser.ui.toolStackedWidget.smoothLength.text())
    
    # Get data and widgets
    plotWidget = browser.ui.dataPlotsWidget
    toolsWidget = browser.ui.toolStackedWidget
    
    # Smooth data
    results = [] 
    for item in plotWidget.plotDataItems:  
        # Copy attributes and add some new ones
        attrs = item.attrs
        attrs['smooth_window_type'] = window
        attrs['smooth_window_length'] = window_len
        
        # Smooth
        traceSmooth = smooth.smooth(item.data, window_len=window_len, window=window)
        results.append([item.text(0), traceSmooth, attrs])
        
        # Plot smoothed trace
        x = np.arange(0, len(traceSmooth)*item.attrs['dt'], item.attrs['dt'])
        plotWidget.plot(x, traceSmooth, pen=pg.mkPen('#F2EF44', width=1))

    # Store results
    parentText = plotWidget.plotDataItems[0].parent().text(0) # Assumes all plotted data have the same parent
    aux.save_results(browser, parentText+'_smooth', results)           
Beispiel #9
0
 def load(self,name):
     """
     Returns a two dimensional numpy array where a[:,0] is
     wavelength in Angstroms and a[:,1] is flux in 
     counts/sec/angstrom/cm^2
     
     Noisy spectra are smoothed with window_len in the .txt file.
     Ergs and AB Mag units are automatically converted to counts.
     """
     fname = self.objects[name]['dataFile']
     fullFileName = os.path.join(self.this_dir,"data",fname[0])
     if (string.count(fullFileName,"fit")):
         a = self.loadSdssSpecFits(fullFileName)
     else:
         a = numpy.loadtxt(fullFileName)
         
     len = int(self.objects[name]['window_len'][0])
     if len > 1:
         a[:,1] = smooth.smooth(a[:,1], window_len=len)[len/2:-(len/2)]
     try:
         fluxUnit = self.objects[name]['fluxUnit'][0]
         scale = float(fluxUnit.split()[0])
         a[:,1] *= scale
     except ValueError:
         print "error"
     ergs = string.count(self.objects[name]['fluxUnit'][0],"ergs")
     if ergs:
         a[:,1] *= (a[:,0] * self.k)
     mag = string.count(self.objects[name]['fluxUnit'][0],"mag")
     if mag:
         a[:,1] = (10**(-2.406/2.5))*(10**(-0.4*a[:,1]))/(a[:,0]**2) * (a[:,0] * self.k)
     return a
Beispiel #10
0
    def speedBias(self, bias_type='normal', debug=False):
        '''
        Calculates the unsigned speed bias quickly without having to
        calculate everything else.
        '''
        if debug: print 'Calculating bias on unsigned speed...'

        # grab important variables
        mod_u = self.Variables.struct['mod_timeseries']['ua']
        mod_v = self.Variables.struct['mod_timeseries']['va']
        mod_spd = np.sqrt(mod_u**2 + mod_v**2)
        obs_u = self.Variables.struct['obs_timeseries']['ua']
        obs_v = self.Variables.struct['obs_timeseries']['va']
        obs_spd = np.sqrt(obs_u**2 + obs_v**2)

        # change times to datetime times
        obs_time = self.Variables.struct['obs_time']
        mod_time = self.Variables.struct['mod_time']
        obs_dt, mod_dt = [], []
        for i in np.arange(obs_time.size):
            obs_dt.append(dn2dt(obs_time[i]))
        for i in np.arange(mod_time.size):
            mod_dt.append(dn2dt(mod_time[i]))

        # perform interpolation and grab bias
        (mod_sp_int, obs_sp_int, step_sp_int, start_sp_int) = \
            smooth(mod_spd, mod_dt, obs_spd, obs_dt,
                   debug=debug)
        stats = TidalStats(mod_sp_int, obs_sp_int, step_sp_int,
                           start_sp_int, type='speed', debug=debug)
        bias = stats.getBias(bias_type=bias_type)
        return bias
Beispiel #11
0
    def powerRMSE(self, debug=False):
        '''
        Calculates the RMSE quickly without having to calculate everything
        else.
        '''
        # grab important variables
        mod_u = self.Variables.struct['mod_timeseries']['ua']
        mod_v = self.Variables.struct['mod_timeseries']['va']
        mod_spd = np.sqrt(mod_u**2 + mod_v**2)
        mod_pow = 0.5 * rho**3 * mod_spd**3

        obs_u = self.Variables.struct['obs_timeseries']['ua']
        obs_v = self.Variables.struct['obs_timeseries']['va']
        obs_spd = np.sqrt(obs_u**2 + obs_v**2)
        obs_pow = 0.5 * rho**3 * obs_spd**3

        # change times to datetime times
        obs_time = self.Variables.struct['obs_time']
        mod_time = self.Variables.struct['mod_time']
        obs_dt, mod_dt = [], []
        for i in np.arange(obs_time.size):
            obs_dt.append(dn2dt(obs_time[i]))
        for i in np.arange(mod_time.size):
            mod_dt.append(dn2dt(mod_time[i]))

        # perform interpolation and grab RMSE
        (mod_pw_int, obs_pw_int, step_pw_int, start_pw_int) = \
            smooth(mod_pow, mod_dt, obs_pow, obs_dt,
                   debug=debug)
        stats = TidalStats(mod_pw_int, obs_pw_int, step_pw_int,
                           start_pw_int, type='power', debug=debug)
        RMSE = stats.getRMSE()
        return RMSE
def edgedetect(image):
	global pixelsum, pixelcount
	oldimage=image
	if image[0]=="P3":
		#converts file to grayscale and smoothes it
		import smooth
		image=smooth.smooth(image)
	file2=oldimage[:]
	xyarray=image[2].split(" ")
	width=int(xyarray[0])
	height=int(xyarray[1])
	count=0
	#loop through all non-edge pixels
	for x in range(1,width-1):
		for y in range(1,height-1):
			#calculate the horizontal gradient of the current pixel
			hg=int(-1*getpixel(x-1,y-1,image)+0*getpixel(x,y-1,image)+1*getpixel(x+1,y-1,image)-2*getpixel(x-1,y,image)+0*getpixel(x,y,image)+2*getpixel(x+1,y,image)-1*getpixel(x-1,y+1,image)+0*getpixel(x,y+1,image)+1*getpixel(x+1,y+1,image))
			#calculate the vertical gradient of the current pixel
			vg=int(1*getpixel(x-1,y-1,image)+2*getpixel(x,y-1,image)+1*getpixel(x+1,y-1,image)+0*getpixel(x-1,y,image)+0*getpixel(x,y,image)+0*getpixel(x+1,y,image)-1*getpixel(x-1,y+1,image)-2*getpixel(x,y+1,image)-1*getpixel(x+1,y+1,image))
			#if the threshold is reached, mark the pixel
			if(abs(hg)+abs(vg)>threshold):
				#count the number of pixels marked as edges
				pixelcount+=1
				if oldimage[0]=="P3": #image is color, mark as red
					file2[(width*y+x)*3+4]=255
					file2[(width*y+x)*3+5]=0
					file2[(width*y+x)*3+5]=0
				else: #image is grayscale, mark as white
					file2[width*y+x+4]=255
	pixelsum=width*height
	return file2
Beispiel #13
0
def loadsimdata(filename):
    # load data and apply efficiency
    itof = hh.load(filename, 'I(tof)')
    eff = hh.load('mon1-eff.h5')
    i = itof.I * eff.I
    itof.I[:] = i

    # clean up
    itof[(0.019,None)].I = 0
    
    #
    x = itof.tof
    y = itof.I

    # convert to counts/10 mus
    #  counts * 10, bins / 10
    from smooth import smooth
    y = smooth(y, window_len=10, window='flat')
    # y = y[:len(x)]
    # y *= 10.
    indexes = range(5, len(x), 10)
    x = x[indexes]
    y = y[indexes] * 10

    # convert to arcs run #5
    # according to ARCS_runinfo.xml of run #5
    # beam power 110kW
    # total run time is 22590/30 seconds
    # the mc simulated was 2MW, 60Hz
    y *= 22590/30*110e3/(2e6/60)

    # extra scaling factor, why?
    y *= 0.83
    return x,y
Beispiel #14
0
	def findSegments(self, rawData, minSegSize=40, smoothData=False, window_len=5, window='blackman', peakFunc=argrelmax):
		principal_component_of_left_foot = self.PCABySensor(rawData)['LTIO'][:,0]

		if smoothData:
			principal_component_of_left_foot = smooth(principal_component_of_left_foot, window_len=window_len,window=window)

		segPoints = peakFunc(principal_component_of_left_foot,order=minSegSize)[0]
		return segPoints
def get_cutoff(data, w_len=50, m=30, guess=None):
    if guess is None:
        approx = get_threshold(data, m)
    else:
        approx = guess

    smoothData = smooth(data)
    return get_first_in_range(smoothData[approx - w_len:approx + w_len],
                              approx, w_len)
Beispiel #16
0
def compareTG(data, plot=False, save_csv=False, debug=False, debug_plot=False):
    """
    Does a comprehensive comparison between tide gauge height data and
    modeled data.

    Input:
       - data = dictionary containing all necessary tide gauge and model data.
    Outputs
       - elev_suite = dictionary of useful statistics
    Options:
       - plot = boolean flag for plotting results
       - save_csv = boolean flag for saving statistical benchmarks in csv file
    """
    if debug: print "CompareTG..."
    # load data
    mod_elev = data['mod_timeseries']['elev']
    obs_elev = data['obs_timeseries']['elev']
    obs_datenums = data['obs_time']
    mod_datenums = data['mod_time']
    gear = data['type'] # Type of measurement gear (drifter, adcp,...)
    #TR: comment out
    #mod_harm = data['elev_mod_harmonics']

    # Save path & create folder
    name = data['name']
    save_path = name.split('/')[-1].split('.')[0]+'/'
    while exists(save_path):
        save_path = save_path[:-1] + '_bis/'
    mkdir(save_path)


    # convert times and grab values
    obs_time, mod_time = [], []
    for i, v in enumerate(obs_datenums):
        obs_time.append(dn2dt(v))
    for j, w in enumerate(mod_datenums):
        mod_time.append(dn2dt(w))

    if debug: print "...check if they line up in the time domain..."
    if (mod_time[-1] < obs_time[0] or obs_time[-1] < mod_time[0]):
        raise PyseidonError("---time periods do not match up---")

    else:

        if debug: print "...interpolate timeseries onto a common timestep..."
        (mod_elev_int, obs_elev_int, step_int, start_int) = \
            smooth(mod_elev, mod_time, obs_elev, obs_time,
                   debug=debug, debug_plot=debug_plot)

    elev_suite = tidalSuite(gear, mod_elev_int, obs_elev_int, step_int, start_int,
                            [], [], [], [], [], [],
                            kind='elevation', plot=plot, save_csv=save_csv, save_path=save_path,
                            debug=debug, debug_plot=debug_plot)

    if debug: print "...CompareTG done."

    return elev_suite
def edgedetect(image):
    print image[:10]
    tvals = image[:]
    hg = image[:]
    vg = image[:]
    theta = image[:]
    global pixelsum, pixelcount
    oldimage = image
    if image[0] == "P3":
        # converts file to grayscale and smoothes it
        import smooth

        image = smooth.smooth(image)
    print image[:10]
    file2 = oldimage[:]
    width = int(image[1])
    height = int(image[2])
    count = 0
    # loop through all non-edge pixels
    for x in range(1, width - 1):
        for y in range(1, height - 1):
            # calculate the horizontal gradient of the current pixel
            hg[width * y + x + 4] = int(
                -1 * getpixel(x - 1, y - 1, image)
                + 0 * getpixel(x, y - 1, image)
                + 1 * getpixel(x + 1, y - 1, image)
                - 2 * getpixel(x - 1, y, image)
                + 0 * getpixel(x, y, image)
                + 2 * getpixel(x + 1, y, image)
                - 1 * getpixel(x - 1, y + 1, image)
                + 0 * getpixel(x, y + 1, image)
                + 1 * getpixel(x + 1, y + 1, image)
            )
            # calculate the vertical gradient of the current pixel
            vg[width * y + x + 4] = int(
                1 * getpixel(x - 1, y - 1, image)
                + 2 * getpixel(x, y - 1, image)
                + 1 * getpixel(x + 1, y - 1, image)
                + 0 * getpixel(x - 1, y, image)
                + 0 * getpixel(x, y, image)
                + 0 * getpixel(x + 1, y, image)
                - 1 * getpixel(x - 1, y + 1, image)
                - 2 * getpixel(x, y + 1, image)
                - 1 * getpixel(x + 1, y + 1, image)
            )
            # store the threshold value
            tvals[width * y + x + 4] = abs(hg[width * y + x + 4]) + abs(vg[width * y + x + 4])
            theta[width * y + x + 4] = float(atan2(-vg[width * y + x + 4], hg[width * y + x + 4]))
            # if the threshold is reached, mark the pixel
    for x in range(1, width - 1):
        for y in range(1, height - 1):
            if int(tvals[width * y + x + 4]) >= hthreshold:
                fill(x, y, file2, tvals, theta, lthreshold)

    pixelsum = width * height
    return file2
 def plot(self,smoothing=0,derivative=False,normalize=False):
     y=smooth(self.countsPerSec,smoothing)
     x=self.E
     if derivative:
         y=(y[1:]-y[:-1])/self.step
         x=x[0:-1]+self.step
         if normalize:
             y/=np.sqrt(sum(y**2))
     elif normalize:
         y*=len(y)/sum(y)
     pl.plot(x,y,label=self.fileName+':'+str(self.part))
Beispiel #19
0
    def testSmoothZCosmos(self):
        std = MKIDStd.MKIDStd()
        raw = std.load("zcosmos841948")
        plt.plot(raw[:,0],raw[:,1], label="raw")

        len = 31
        smoothed = smooth.smooth(raw[:,1],window_len=len)
        plt.plot(raw[:,0],smoothed[len/2:-(len/2)], label="smoothed")

        plt.legend()
        plt.show()
def get_guess(spec, window_len=11, n=2, m=20, bins=2048):
    sdata = smooth(spec[::-1], window_len)

    # find values which are higher than neighbors
    test = r_[True, sdata[1:] > sdata[:-1]] & r_[sdata[:-1] > sdata[1:], True]

    # find values which are higher than the next m neighbors
    newInds = where(test)[0]
    values = [i - 5 for i in newInds[newInds > m] if sdata[i] ==
              max(sdata[i - m:i + m + 1]) and sdata[i] > 1.0]
    mu_values = [bins - i for i in values[n - 2:n]]
    return mean(mu_values)
Beispiel #21
0
    def testSmoothDelta(self):
        nIn = 100;
    
        x = np.zeros(nIn)
        x[nIn/2] = 1

        len = 21
        xs = smooth.smooth(x, window_len=len)
        plt.clf()
        plt.plot(x)
        plt.plot(xs[len/2:-(len/2)])
        plt.savefit("testSmoothDelta.png")
Beispiel #22
0
def velocity(kin_info, smooth_window):
    mech_time_mat = np.matrix(kin_info['mech_time_arr'])
    tstep_size = .03333333333
    num_el = mech_time_mat.shape[1]
    uniform_time2 = np.cumsum(np.round((mech_time_mat[0,1:] - mech_time_mat[0,0:-1]) / tstep_size) * tstep_size)
    uniform_time2 = np.column_stack((np.matrix([0]), uniform_time2))

    mech_intrinsic_poses = kin_info['disp_mech_coord_arr']
    if uniform_time2.shape[1] != mech_intrinsic_poses.shape[0]:
        pdb.set_trace()

    vel = gradient(uniform_time2, np.matrix(mech_intrinsic_poses))
    return smooth.smooth(vel.A1, smooth_window, 'blackman')
Beispiel #23
0
def loadexpdata(filename, n, binsize=10):
    # load data and apply efficiency
    itof = hh.load(filename)
    x = itof.tof
    y = itof.I

    # convert to counts/10 mus
    #  counts * 10, bins / 10
    from smooth import smooth
    y = smooth(y, window_len=binsize, window='flat')
    indexes = range(binsize/2, binsize/2+binsize*n, binsize)
    x = x[indexes]
    y = y[indexes] * binsize

    return x,y
Beispiel #24
0
def extrema(a,bPlot=False,nBins=300,smoothWindowSize=50):
    hist,binEdges = np.histogram(a,bins=nBins,density=True)
    histSmooth = smooth.smooth(hist,smoothWindowSize,'hanning')
    binCenters = binEdges[0:-1]+np.diff(binEdges)/2.
    extremeIdxs = np.where(np.diff(np.sign(np.diff(histSmooth))))[0]+1
    bMaximums = np.diff(np.sign(np.diff(histSmooth)))<0
    bMaximums = bMaximums[np.where(np.diff(np.sign(np.diff(histSmooth))))]

    if bPlot:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.plot(binCenters,histSmooth)
        ax.plot(binCenters,hist)
        ax.plot(binCenters[extremeIdxs],histSmooth[extremeIdxs],'.')
    return {'extremeX':binCenters[extremeIdxs],'extremeY':hist[extremeIdxs],'bMaximums':bMaximums}
Beispiel #25
0
def summaryStatistics(data, std_window=10):
	gradients = []
	for col in range(np.shape(data)[1]):
		gradients.append(np.gradient(smooth(data[:,col])))
	gradients = np.column_stack(gradients)

	standardDeviations = []
	for col in range(np.shape(data)[1]):
		stdev = []
		for i in range(len(data)):
			stdev.append(np.std(data[:,col][max(0,i-(std_window/2)):i+(std_window/2)]))
		standardDeviations.append(stdev)
	standardDeviations = np.column_stack(standardDeviations)

	return (gradients,standardDeviations)
Beispiel #26
0
def compareTG(data, plot=False, save_csv=False, debug=False, debug_plot=False):
    '''
    Does a comprehensive comparison between tide gauge height data and
    modeled data, much like the above function.

    Input is a dictionary containing all necessary tide gauge and model data.
    Outputs a dictionary of useful statistics.
    '''
    if debug: print "CompareTG..."
    # load data
    mod_elev = data['mod_timeseries']['elev']
    obs_elev = data['obs_timeseries']['elev']
    obs_datenums = data['obs_time']
    mod_datenums = data['mod_time']
    #TR: comment out
    #mod_harm = data['elev_mod_harmonics']

    # convert times and grab values
    obs_time, mod_time = [], []
    for i, v in enumerate(obs_datenums):
	obs_time.append(dn2dt(v))
    for j, w in enumerate(mod_datenums):
	mod_time.append(dn2dt(w))

    if debug: print "...check if they line up in the time domain..."
    if (mod_time[-1] < obs_time[0] or obs_time[-1] < mod_time[0]):
        print "---time periods do not match up---"
        sys.exit()

    else:

        if debug: print "...interpolate timeseries onto a common timestep..."
        (mod_elev_int, obs_elev_int, step_int, start_int) = \
            smooth(mod_elev, mod_time, obs_elev, obs_time,
                   debug=debug, debug_plot=debug_plot)

    if debug: print "...get validation statistics..."
    stats = TidalStats(mod_elev_int, obs_elev_int, step_int, start_int, type='elevation',
                       debug=debug, debug_plot=debug_plot)


    elev_suite = tidalSuite(mod_elev_int, obs_elev_int, step_int, start_int,
			    type='elevation', plot=plot, save_csv=save_csv,
                            debug=debug, debug_plot=debug_plot)

    if debug: print "...CompareTG done."

    return elev_suite
Beispiel #27
0
def compareTG(data):
    '''
    Does a comprehensive comparison between tide gauge height data and
    modeled data, much like the above function.

    Input is a dictionary containing all necessary tide gauge and model data.
    Outputs a dictionary of useful statistics.
    '''
    # load data
    mod_elev = data['mod_timeseries']['elev']
    obs_elev = data['obs_timeseries']['elev']
    obs_datenums = data['obs_time']
    mod_datenums = data['mod_time']
    mod_harm = data['elev_mod_harmonics']
    print data['name']

    # convert times and grab values
    obs_time, mod_time = [], []
    for i, v in enumerate(obs_datenums):
	obs_time.append(dn2dt(v))
    for j, w in enumerate(mod_datenums):
	mod_time.append(dn2dt(w))

    # check if they line up in the time domain
    if (mod_time[-1] < obs_time[0] or obs_time[-1] < mod_time[0]):

	# use ut_reconstr to create a new timeseries
	mod_elev_int = ut_reconstr(obs_datenums, mod_harm)[0]
	obs_elev_int = obs_elev
	step_int = obs_time[1] - obs_time[0]
	start_int = obs_time[0]

    else:

        # interpolate timeseries onto a common timestep
        (obs_elev_int, mod_elev_int, step_int, start_int) = \
            smooth(mod_elev, mod_time, obs_elev, obs_time)

    # get validation statistics
    stats = TidalStats(mod_elev_int, obs_elev_int, step_int, start_int,
		       debug=True, type='height')
    elev_suite = stats.getStats()
    elev_suite['r_squared'] = stats.linReg()['r_2']
    elev_suite['phase'] = stats.getPhase(debug=False)

    return elev_suite
Beispiel #28
0
def findSigmaThresh(data, initSigmaThresh=2., tailSlack=0., isPlot=False):
    '''
    Finds the optimal photon trigger threshold by cutting out the  noise tail
    in the pulse height histogram.
    
    INPUTS:
    data - filtered phase timestream data (positive pulses)
    initSigmaThresh - sigma threshold to use when constructing initial
        pulse height histogram
    tailSlack - amount (in same units as data) to relax trigger threshold
    isPlot - make peak height histograms if true

    OUTPUTS:
    threshold - trigger threshold in same units as data
    sigmaThresh - trigger threshold in units sigma from median
    '''
    peakdict = sigmaTrigger(data, nSigmaTrig=initSigmaThresh)
    peaksHist, peaksHistBins = np.histogram(peakdict['peakHeights'], bins='auto')
    if(isPlot):    
        plt.plot(peaksHistBins[:-1], peaksHist)
        plt.title('Unsmoothed Plot')
        plt.show()
    print 'peaksHistLen:', len(peaksHist)
    peaksHist = smooth.smooth(peaksHist,(len(peaksHistBins)/20)*2+1)
    print 'peaksHistSmoothLen:', len(peaksHist)
    if(isPlot):
        plt.plot(peaksHistBins[0:len(peaksHist)], peaksHist)
        plt.title('smoothed plot')
        plt.show()
    
    minima=np.ones(len(peaksHist)) #keeps track of minima locations; element is 1 if minimum exists at that index
    minimaCount = 1
    #while there are multiple local minima, look for the deepest one
    while(np.count_nonzero(minima)>1):      
        minima = np.logical_and(minima, np.logical_and((peaksHist<=np.roll(peaksHist,minimaCount)),(peaksHist<=np.roll(peaksHist,-minimaCount))))
        #print 'minima array:', minima
        minima[minimaCount-1]=0
        minima[len(minima)-minimaCount]=0 #get rid of boundary effects
        minimaCount += 1

    thresholdInd = np.where(minima)[0][0]
    threshold = peaksHistBins[thresholdInd]-tailSlack
    sigmaThresh = (threshold-np.median(data))/np.std(data)
    return threshold, sigmaThresh
Beispiel #29
0
def plotTrackingData(trackingData, props):
    colors = ['Crimson', 'CornflowerBlue', 'DarkOliveGreen', 'DarkOrange', 'LightSlateGray']
    nData = len(trackingData) 
    # Prepare plots
    fig = plt.figure(figsize=(20, 4*nData))
    gs = gridspec.GridSpec(nData,3, width_ratios=[1,6,1])
    ax, dataOut = [], []
    for d in np.arange(0, len(trackingData)):
        aviProps = props[d]
        trackData = trackingData[d]
        dataOut.append(trackData[:,1])
        ax1, ax2, ax3 = d*3+0, d*3+1, d*3+2
        ax.append(fig.add_subplot(gs[ax1]))
        ax.append(fig.add_subplot(gs[ax2]))
        ax.append(fig.add_subplot(gs[ax3]))

        # Plot tracking
        ax[ax1].plot(trackData[:,0],trackData[:,1],colors[d])

        # Plot Y over time
        timePerFrame = 1./aviProps[4]
        timePerFrame = timePerFrame/60. # convert from seconds to minutes
        tAxis = np.arange(0, len(trackData)*timePerFrame, timePerFrame)
        if len(tAxis)>len(trackData): tAxis = np.delete(tAxis, -1)
        if len(tAxis)<len(trackData): trackData = np.delete(trackData, -1)
        smooth_trackData = smooth.smooth(trackData[:,1], window_len=50)
        ax[ax2].plot(tAxis, trackData[:,1],'k',lw=1)
        ax[ax2].plot(tAxis, smooth_trackData, colors[d], lw=1.5)

        # Plot histogram
        ax[ax3].hist(trackData[:,1], bins=30, orientation='horizontal', histtype='stepfilled', normed=True, color=colors[d])
        ax[ax3].set_xlim([0,0.008])

        # get and plot looms
        #loomOnsets = da.getLoomOnsets(trackData, aviProps)
        #for l in loomOnsets: ax[ax2].plot(l, 1150, marker='o', markerfacecolor='k', markeredgecolor='k')
    plt.show()
    return dataOut
Beispiel #30
0
def segmentationPoints(X, xIsFilename=False, windowSize=100, smoothing='blackman'):
	if xIsFilename == True:
		with open(X,'r') as fin:
			X = np.loadtxt(fin,delimiter=",")
	

	smoothX = sm.smooth(X,window_len=windowSize,window=smoothing)
	#(unsmoothedMins,unsmoothedMaxs) = mm.find_mins_and_maxs1D(X)
	(smoothedMins,smoothedMaxs) = mm.find_mins_and_maxs1D(smoothX)
	# Using Dynamic Time Warping to map the smoothed curve onto the original, noisy curve.
	alignmentOfSmoothedCurveAndOriginal = r.dtw(smoothX, X)
	warpIndexes=r.warp(alignmentOfSmoothedCurveAndOriginal,True)
	#pl.plot(warpIndexes,smoothX)
	#pl.plot(X)
	#pl.show()

	minsMappedToOriginal = sorted(list(set([warpIndexes[smoothedMins[i]] for i in range(len(smoothedMins))])))
	maxsMappedToOriginal = sorted(list(set([warpIndexes[smoothedMaxs[i]] for i in range(len(smoothedMaxs))])))

	#minsMappedToOriginal = mapToOriginalRecursive(X,smoothedMins,unsmoothedMins,"mins")
	#maxsMappedToOriginal = mapToOriginalRecursive(X,smoothedMaxs,unsmoothedMaxs,"maxs")

	return (minsMappedToOriginal,maxsMappedToOriginal)
zffs[zffs >
     0] = 1  # To find zero crossings, place value 1 at all locations zffs>0
zffs[zffs < 0] = -1  # Place value -1 at all locations zffs<0
gci = np.where(np.diff(zffs) == 2)[
    0]  # Take difference and look for the positions of value 2
es = np.abs(
    zf1[gci + 1] -
    zf1[gci - 1])  # Positions of value 2 are the instants of zero crossings
T0 = np.diff(gci)  # Finding period interms of sample number
T0 = T0 / float(fs)  # Period in seconds
f0 = 1.0 / T0  # Frequency in Hz
f0 = np.append(f0, f0[-1],
               f0[-1])  # Filling holes created by two difference operation

# Smoothing the melody to remove high frequency contents
f0Smooth = smooth.smooth(f0, 10, 'flat')

# ------------------------------------------------------------------------------- #
# Remove unvoiced portions from F0 contour
tempF0 = np.zeros(lenSig)
tempF0[gci] = f0Smooth

# Get F0 at only voiced regions
voicPitch = np.zeros(np.shape(x)[0])

if (np.shape(timeBegVoicSamp)[0] == np.shape(timeEndVoicSamp)[0]
    ):  # Check if number of instants are same
    for i in range(np.shape(timeBegVoicSamp)[0]):
        begInst = timeBegVoicSamp[i]  # Get beg instant
        endInst = timeEndVoicSamp[i]  # Get end instant
        voicPitch[begInst:endInst] = tempF0[begInst:
Beispiel #32
0
def loaddata2(input_dir,
              ref_dir,
              sigmoid=5,
              do_normalize=True,
              denoise=False,
              fs=360):
    X = []
    Y = []
    QRS = []

    datapaths = glob.glob(os.path.join(input_dir, '*.mat'))
    datapaths.sort()
    for data_path in datapaths:

        #print(data_path)

        # load data
        data_mat = sio.loadmat(data_path)
        data = data_mat['ecg']
        data = np.array(data, dtype=np.float32)
        #         print(data.shape)

        for i in range(data.shape[1]):
            smoothed_signal = smooth.smooth(data[:, i],
                                            window_len=int(fs),
                                            window='flat')
            data[:, i] = data[:, i] - smoothed_signal

        # denoise ECG
        for i in range(data.shape[1]):
            # DWT
            coeffs = pywt.wavedec(data[:, i], 'db4', level=3)
            # compute threshold
            noiseSigma = 0.01
            threshold = noiseSigma * math.sqrt(2 * math.log2(data[:, i].size))
            # apply threshold
            newcoeffs = coeffs
            for j in range(len(newcoeffs)):
                newcoeffs[j] = pywt.threshold(coeffs[j],
                                              threshold,
                                              mode='soft')

            # IDWT
            data[:, i] = pywt.waverec(newcoeffs, 'db4')[0:len(data)]

        # normalize the data
        # data = scale(data).astype(np.float32)

        # normalize the data
        if do_normalize:
            scaler = StandardScaler()
            scaler.fit(data)
            data = scaler.transform(data).astype(np.float32)

        X.append(data)

    X = np.array(X, dtype=np.float32)
    print('X.shape', X.shape)

    refpaths = glob.glob(os.path.join(ref_dir, '*.mat'))
    refpaths.sort()
    for ref_path in refpaths:

        # load reference
        ref_mat = sio.loadmat(ref_path)
        R_peaks = ref_mat['R_peak']
        R_peaks = np.array(R_peaks, dtype=np.int32).squeeze()
        ref = smooth_ref(len(data), R_peaks, sigmoid=sigmoid)
        ref = np.expand_dims(ref, axis=-1)
        Y.append(ref)
        QRS.append(R_peaks)

    Y = np.array(Y, dtype=np.float32)

    print('Y.shape', Y.shape)

    # remove duplicate rows
    #X,indexes = np.unique(X,  return_index=True, axis=0)
    #print(X.shape)
    # Y = Y[indexes]
    # QRS = itemgetter(*indexes)(QRS)

    # shuffle
    #     X, Y, QRS = shuffle(X, Y, QRS, random_state=0)

    return X, Y, QRS
Beispiel #33
0
def compareTG(data, plot=False, save_csv=False, debug=False, debug_plot=False):
    """
    Does a comprehensive comparison between tide gauge height data and
    modeled data.

    Input:
       - data = dictionary containing all necessary tide gauge and model data.
    Outputs
       - elev_suite = dictionary of useful statistics
    Options:
       - plot = boolean flag for plotting results
       - save_csv = boolean flag for saving statistical benchmarks in csv file
    """
    if debug: print "CompareTG..."
    # load data
    mod_elev = data['mod_timeseries']['elev']
    obs_elev = data['obs_timeseries']['elev']
    obs_datenums = data['obs_time']
    mod_datenums = data['mod_time']
    gear = data['type']  # Type of measurement gear (drifter, adcp,...)
    #TR: comment out
    #mod_harm = data['elev_mod_harmonics']

    # Save path & create folder
    name = data['name']
    save_path = name.split('/')[-1].split('.')[0] + '/'
    while exists(save_path):
        save_path = save_path[:-1] + '_bis/'
    mkdir(save_path)

    # convert times and grab values
    obs_time, mod_time = [], []
    for i, v in enumerate(obs_datenums):
        obs_time.append(dn2dt(v))
    for j, w in enumerate(mod_datenums):
        mod_time.append(dn2dt(w))

    if debug: print "...check if they line up in the time domain..."
    if (mod_time[-1] < obs_time[0] or obs_time[-1] < mod_time[0]):
        raise PyseidonError("---time periods do not match up---")

    else:

        if debug: print "...interpolate timeseries onto a common timestep..."
        (mod_elev_int, obs_elev_int, step_int, start_int) = \
            smooth(mod_elev, mod_time, obs_elev, obs_time,
                   debug=debug, debug_plot=debug_plot)

    elev_suite = tidalSuite(gear,
                            mod_elev_int,
                            obs_elev_int,
                            step_int,
                            start_int, [], [], [], [], [], [],
                            kind='elevation',
                            plot=plot,
                            save_csv=save_csv,
                            save_path=save_path,
                            debug=debug,
                            debug_plot=debug_plot)

    if debug: print "...CompareTG done."

    return elev_suite
        print 'x,y', x[0], x[1]
        u = self.u(x[0], x[1])
        return u != 0.


subdomains = CellFunction("size_t", mesh)
subdomains.set_all(0)
validdata = ValidData(u)
validdata.mark(subdomains, 1)
dx = Measure("dx")[subdomains]

# Smooth the surface and find flow directions based on
# Smoothed surface gradients
rho = 911
g = 9.81
Nx = smooth(Q, H, -rho * g * H * dSdx, kappa_i)
Ny = smooth(Q, H, -rho * g * H * dSdy, kappa_i)

# Replace smoothed surface slope direction with data where ice is fast enough
# to lower errors:
uhat = project(u / Unorm, Q)
vhat = project(v / Unorm, Q)
# Smooth these directions out too, but over fewer ice thicknesses
uhat = smooth(Q, H, rho * g * H * uhat, kappa_v)
vhat = smooth(Q, H, rho * g * H * vhat, kappa_v)

Nx.vector()[Unorm.vector() > goodU] = uhat.vector()[Unorm.vector() > goodU]
Ny.vector()[Unorm.vector() > goodU] = vhat.vector()[Unorm.vector() > goodU]

# Finally, smooth again to easy the transitions between data and surface slope
Nx = smooth(Q, H, rho * g * H * Nx, kappa_v)
Beispiel #35
0
def conductGeneration(generation, corpus, previous_output):
        '''
        Conducts a generation of learning and testing on the input data

        Inputs
                generation (int) --- the number of the generation
                corpus (array) --- the lemmas and their info from reading the corpus file
                previous_output (dict) --- the output phonology of the previous generation
        Returns the output of the current generation--the expected outputs for the following generation
        '''

        # Build the right size network
        net = buildNetwork(input_nodes, constants.hidden_nodes, constants.output_nodes)

        # Build the right size training set
        emptytraining_set = SupervisedDataSet(input_nodes, constants.output_nodes)

        # Initialize corpus object and expected output dictionary
        training_corpus = objects.Corpus(emptytraining_set)

        # Iterate through tokens and convert to binary
        for lemma in corpus:

                # Iterate through cases
                for case, form in lemma.cases.iteritems():

                        # Add words according to their frequencies
                        training_corpus.addByFreq(constants.token_freq, form, previous_output[form.lemmacase])

        # Construct the training set
        print '''--------Generation %d--------''' % generation
        print '''----------Trial %s-----------
        Training on %d Epochs
        Vectors: %s
        Number of Input Nodes: %d
        Number of Hidden Nodes: %d
        Number of Output Nodes: %d
        Token Frequency taken into account: %s
        Language to Model: %s
        Secondary Sound Change (for Italian and Romanian) Implemented: %s\n''' % (
                constants.trial,
                constants.epochs,
                constants.vectors, 
                input_nodes,
                constants.hidden_nodes,
                constants.output_nodes,
                constants.token_freq,
                constants.language,
                bool(constants.secondsoundchange)
                )

        print "Constructing the training set"

        training_set = training_corpus.constructTrainingSet()

        # Construct the trainer
        trainer = BackpropTrainer(net, training_set)

        # Train
        print "Training the model"

        error = trainer.trainEpochs(constants.epochs)
        
        print "Number of Tokens in Training Set: %s" % len(training_set)

        results = {}

        # For each word in the test set, calculate output tuple
        print "Running the test set"

        # Counter to count correct
        ncorrect = 0
        tot_phon = 0

        for (form, input_tuple, expected_output) in training_corpus.test:

                # Activate the net, and smooth the output
                result = smooth(tuple(net.activate(input_tuple)))

                # Append output tuple to result
                results[form.lemmacase] = result

                # Hash the output tuple to get the phonological form result
                new_phonology = ''

                # Divide tuple into chunks (each 11 units, representing one phoneme)
                chunked_list = list(chunks(list(result), constants.n_feat))
                # Divide previous output tuple into chunks
                chunked_prev = list(chunks(list(previous_output[form.lemmacase]), constants.n_feat))

                for phon_index in range(len(chunked_list)):
                        phoneme = chunked_list[phon_index]
                        prev_phoneme = chunked_prev[phon_index]

                        new_phonology += constants.feat_to_phon[tuple(phoneme)]
                        # If phoneme matches, add to number correct
                        if prev_phoneme != [0.0]*constants.n_feat:
                                tot_phon += 1
                                if phoneme == prev_phoneme:
                                        ncorrect += 1

                # Output for this generation is new suffix
                new_suf = ''.join(new_phonology)
                for seq in functions.to_revert.keys():
                        if seq in new_suf:
                            new_suf = new_suf.replace(seq, functions.to_revert[seq])
                new_suf = new_suf.replace('-', '')
                if new_suf == '': new_suf = '-'

                form.output_change[generation] = new_suf

                print form.lemmacase, form.root+form.suffix, form.changed_suf, form.parent.declension, form.parent.gender, form.parent.totfreq, form.root+new_suf, new_suf

        print "Results have been determined"
        try: 
            print "Percentage correct in test run: %f" % round(float(ncorrect)/float(tot_phon)*100, 2)
        except ZeroDivisionError: print "Percentage correct in test run: 0.00"

        return results
Beispiel #36
0
        if name == line[0:9].replace(' ', ''):
            fluxline = [
                float(line[i:i + 10]) * 1.e13
                for i in range(10,
                               len(line) - 1, 10)
            ]
            flux += fluxline
    data[name] = flux

# Get list of star names
starnames = []
fn = open('jacoby-list.rasort', 'r')
lines = fn.readlines()
for line in lines:
    starname = line.split()[1]
    starnames.append(starname)
fn.close()

# Set initial wavelength
lambda1 = 351.0 - 1.3  # Determined from Balmer lines

# Create plot and CSV files
for star in starnames:
    if star in data:
        y = np.array(data[star])
        y = smooth.smooth(y, window_len=21, window='hanning')
        x = 0.14 * np.arange(len(y))
        x += lambda1
        plot_star(x, y)
        write_csv(x, y)
                         xmin=xmin,
                         xmax=xmax)
    test = prepare_data(filename_format,
                        cases_test,
                        channels,
                        qctimes_test,
                        xmin=xmin,
                        xmax=xmax)
else:
    train = prepare_data(filename_format, cases_train, channels, qctimes_train)
    test = prepare_data(filename_format, cases_test, channels, qctimes_test)

if data_suffix in ['ctc', 'ctc2']:
    for ibatch in range(train['nbatches']):
        train['Xdata'][ibatch,:,:,4] = smooth(\
            train['Xdata'][ibatch,:,:,4],\
            np.isfinite(train['Xdata'][ibatch,:,:,4]), (3,3))
    for ibatch in range(test['nbatches']):
        test['Xdata'][ibatch,:,:,4] = smooth(\
            test['Xdata'][ibatch,:,:,4],\
            np.isfinite(test['Xdata'][ibatch,:,:,4]), (3,3))

if data_suffix == 'C13':
    print('zero out C07, C09, GLM')
    train['Xdata'][:, :, :, 0] = 0.  #zero out C07
    train['Xdata'][:, :, :, 1] = 0.  #zero out C09
    train['Xdata'][:, :, :, 3] = 0.  #zero out GLM
    test['Xdata'][:, :, :, 0] = 0.  #zero out C07
    test['Xdata'][:, :, :, 1] = 0.  #zero out C09
    test['Xdata'][:, :, :, 3] = 0.  #zero out GLM
def conductGeneration(generation, corpus, previous_output):
    '''
        Conducts a generation of learning and testing on the input data

        inputs
                generation (int) --- the number of the generation
                corpus (array) --- the lemmas and their info from reading the corpus file
                previous_output (dict) --- the output (gender, declension, case, number) of the previous generation
        Returns the output of the current generation--the expected outputs for the following generation

        '''

    # Build the right size network
    net = buildNetwork(constants.input_nodes, constants.hidden_nodes,
                       constants.output_nodes)

    # Build the right size training set
    emptytraining_set = SupervisedDataSet(constants.input_nodes,
                                          constants.output_nodes)

    # Initialize corpus object and expected output dictionary
    training_corpus = objects.Corpus(emptytraining_set)

    # Iterate through tokens and convert to binary
    for lemma in corpus:
        # JUST SKIP THOSE THAT ARE DEFECTIVE
        if len(lemma.cases.keys()) < 6:
            continue

        # Iterate through cases
        for case, form in lemma.cases.iteritems():
            # Get new input from previous output
            new_gender, new_dec, new_case, new_num, prev_output = form.output_change[
                generation - 1]

            # Use new input as new syllables
            new_syllables = lemma.cases[new_case + new_num].syllables

            # Append to input change
            form.input_change[generation] = (new_case + new_num,
                                             ''.join(new_syllables).replace(
                                                 '-', ''))

            # print form.lemmacase, form.input_change[generation]

            # Create the input tuple
            form.createInputTuple(new_syllables)

            # Add words according to their frequencies
            training_corpus.addByFreq(constants.token_freq, form,
                                      expected_outputs[form.lemmacase])

    # Print information
    print "--------Generation %s--------" % generation
    if generation >= constants.gnvdrop_generation:
        print "Genitive Case Dropped"

    # Construct the training set
    print "Constructing the training set"
    training_set = training_corpus.constructTrainingSet()

    # Construct the trainer
    trainer = BackpropTrainer(net, training_set)

    # Train
    print "Training the model"
    if constants.epochs == 1:
        error = trainer.train()
    else:
        error = trainer.trainEpochs(constants.epochs)
    print "Number of Tokens in Training Set: %s" % len(training_set)
    print "Training Error: %s" % error

    results = {}

    # For each word in the test set
    print "Running the test set"
    for (form, input_tuple, expected_output) in training_corpus.test:

        # Determine if we should drop the genitive
        drop_gen = generation >= constants.gnvdrop_generation

        # Activate the net, and smooth the output
        result = smooth(tuple(net.activate(input_tuple)),
                        gendrop=drop_gen,
                        hierarchy=constants.hierarchy)

        # Append output tuple to result
        results[form.lemmacase] = result

        # Hash the output tuple to get the result
        gender = constants.tup_to_gen[tuple(
            result[constants.gen_b:constants.dec_b])]
        dec = constants.tup_to_dec[tuple(
            result[constants.dec_b:constants.case_b])]
        case = constants.tup_to_case[tuple(
            result[constants.case_b:constants.num_b])]
        num = constants.tup_to_num[tuple(result[constants.num_b:])]
        output = form.parent_lemma.cases[case + num].phonology

        # Set input change once we figure out how to deal with the phonology
        form.output_change[generation] = (gender, dec, case, num, output)

    print "Results have been determined"

    return results
Beispiel #39
0
    error = trainer.trainEpochs(constants.epochs)

    print "Number of Tokens in Training Set: %s" % len(training_set)

    results = {}

    # For each word in the test set, calculate output tuple
    print "Running the test set"

    # Counter to count correct
    ncorrect = 0

    for (form, input_tuple, expected_output) in training_corpus.test:

        # Activate the net, and smooth the output
        result = smooth(tuple(net.activate(input_tuple)), suf_to_tup)

        # Append output tuple to result
        results[form.lemmacase] = result

        # Add to ncorrect if matches previous
        if result == previous_output[form.lemmacase]: ncorrect += 1

        # Hash the output tuple to get the suffix result
        new_suffix = suffix_dict[result]

        print form.lemmacase, form.root + form.suffix, form.parent.declension, form.parent.gender, form.parent.totfreq, form.root + new_suffix, new_suffix

        # Output for this generation and input for next
        # Combine new suffix with root to get new phonological form for next generation
        form.input_phon[generation + 1] = form.root + new_suffix
Beispiel #40
0
        tss.add(HTSeq.GenomicPosition(chr, pos, strand))
    ifp2.close()
    profile = numpy.zeros(2 * tssdis + 1, dtype='i')
    for p in tss:
        promoter = HTSeq.GenomicInterval(p.chrom, p.pos - tssdis,
                                         p.pos + tssdis + 1, '.')
        if promoter.start < 0 or promoter.end > genomesize[p.chrom]:
            continue
        wincvg = numpy.fromiter(coverage[promoter],
                                dtype='i',
                                count=2 * tssdis + 1)
        if p.strand == '+':
            profile += wincvg
        else:
            profile += wincvg[::-1]
#	mydata.append(profile/1e6)
    y = smooth.smooth(profile / float(myitem[-1]) * 1e6, 1000)
    pyplot.plot(x, y, myitem[2] + '-', lw=2, label=myitem[3])

ifp1.close()
pyplot.legend(prop={'size': 8})
pyplot.xlim(-1510, 1510)
pyplot.xticks([-1500, -1000, -500, 0, 500, 1000, 1500],
              ('-1500', '-1000', '-500', '0', '500', '1000', '1500'))
pyplot.xlabel('Distance form center (bp)')
pyplot.ylabel('Average coverage (reads per million)')
pyplot.title(sys.argv[6])
#pyplot.show()
pyplot.savefig(sys.argv[6] + '.' + sys.argv[5] + '.smooth.png')
pyplot.savefig(sys.argv[6] + '.' + sys.argv[5] + '.smooth.eps')
Beispiel #41
0
        y_sample_used = torch.from_numpy(y_sample_used).float().to(device)
        y_shuffle_used = torch.from_numpy(y_shuffle_used).float().to(device)

        pred_xy_de = model_de(x_sample_used, y_sample_used)
        pred_x_y_de = model_de(x_sample_used, y_shuffle_used)

        ret_de = torch.mean(pred_xy_de) - torch.log(
            torch.mean(torch.exp(pred_x_y_de)))
        loss_de = -ret_de  # maximize
        plot_loss_de.append(loss_de.to('cpu').item())
        model_de.zero_grad()
        loss_de.backward()
        optimizer_de.step()

    plot_y_de = np.array(plot_loss_de).reshape(-1, )
    smooth_mi_de = smooth(plot_y_de)

    trans_all = np.load(trans_data_dir + item[:-4] + '.npz')
    x_sample_trans = trans_all['eeg']
    y_sample_trans = trans_all['eye']
    model_trans = Net().to(device)
    optimizer_trans = torch.optim.Adam(model_trans.parameters(), lr=0.001)
    plot_loss_trans = []

    for epoch in tqdm(range(n_epoch)):
        number_perm = torch.randperm(1823)
        idx = number_perm[:500]
        x_sample_used_trans = x_sample_trans[idx]
        y_sample_used_trans = y_sample_trans[idx]
        y_shuffle_used_trans = np.random.permutation(y_sample_used_trans)
def conductGeneration(generation, corpus, previousOutput):
    '''
        Conducts a generation of learning and testing on the input data

        inputs
                generation (int) --- the number of the generation
                corpus (array) --- the output of reading the corpus file
                previousOutput (dict) --- the output of the previous generation
        outputs

        
        '''

    print "Trial %s" % str(constants.trial)
    input_size = constants.inputNodes

    # if we're using slavic data, modify the expected size of the input vector.
    if constants.includeSlavic and generation >= constants.generationToIntroduceSlavic:
        input_size = constants.inputNodesSlav

    # build the right size network
    net = buildNetwork(input_size, constants.hiddenNodes,
                       constants.outputNodes)

    # build the right size training set
    emptyTrainingSet = SupervisedDataSet(input_size, constants.outputNodes)

    # initialize corpus object
    trainingCorpus = objects.Corpus(emptyTrainingSet)

    # iterate through tokens passed to the function
    for token in corpus:

        # iterate through cases
        for (case, word) in token.cases:
            # set its syllables, based on the generation (i.e. account for sound changes)
            word.setSyllables(generation, word.syllables)
            # extract the gender from the previous generation
            print previousOutput
            (placeholder, previousResult) = previousOutput[[
                wordinfo for (wordinfo, gender) in previousOutput
            ].index(word.description)]
            # print previousResult
            # print placeholder # we already know the word
            # adds words according to their frequencies
            trainingCorpus.configure(word, previousResult, generation)

    # construct the training set
    trainingSet = trainingCorpus.constructTrainingSet()

    # construct the trainer
    trainer = BackpropTrainer(net, trainingSet)

    # train
    if constants.epochs == 1:
        error = trainer.train()
    else:
        error = trainer.trainEpochs(constants.epochs)

    print "--------Generation: %s--------" % generation
    if generation >= constants.generationToDropGen:
        print "Genitive Case Dropped"

    if constants.includeSlavic and generation >= constants.generationToIntroduceSlavic:
        print "Slavic Information Introduced"

    print "Number of Training Epochs: %s" % constants.epochs
    print "Number of Training Tokens: %s" % len(trainingSet)
    print "Training Error: %s" % error

    results = []

    # Dictionary of changes
    changes = {
        'total': 0,
        'gen_change': defaultdict(lambda: 0),
        'dec_change': defaultdict(lambda: 0),
        'gencase_change': defaultdict(lambda: 0),
        'gennum_change': defaultdict(lambda: 0),
        'deccase_change': defaultdict(lambda: 0),
        'decnum_change': defaultdict(lambda: 0),
        'gencasenum_change': defaultdict(lambda: 0),
        'deccasenum_change': defaultdict(lambda: 0)
    }
    # for each work in the input
    for (word, inputTuple, expectedOutput, trueLatinGender,
         trueRomanianGender) in trainingCorpus.test:

        # Count how many tokens are in the test set
        counterBag.totalCounter.increment()

        # determine if we should drop the genetive
        should_drop_gen = generation >= constants.generationToDropGen

        # activate the net, and smooth the output
        result = smooth(tuple(net.activate(inputTuple)),
                        gendrop=should_drop_gen,
                        equalcase=True)

        # append output tuple to result
        results.append((word.description, result))

        # If this is the first generation
        if counterBag.generationCounter.value == 1:
            # add
            genchange[word.description].append(
                (0, word.parentToken.latinGender[0]))

        # Change index depending if gen has been dropped or not
        (gen_b, gen_e, dec_b, dec_e, case_b, case_e, num_b,
         num_e) = (0, 3, 3, 8, 8, 11, 11, 13)

        # hash the output tuple to get the result
        gender = constants.tup_to_gen[tuple(result[gen_b:gen_e])]
        declension = constants.tup_to_dec[tuple(result[dec_b:dec_e])]
        case = constants.tup_to_case[tuple(result[case_b:case_e])]
        num = constants.tup_to_num[tuple(result[num_b:num_e])]

        to_add = (counterBag.generationCounter.value,
                  gender + declension + case + num,
                  word.parentToken.latinGender[0], word.parentToken.declension,
                  word.case, word.num)

        genchange[word.description].append(to_add)
        word.genchange[counterBag.generationCounter.value] = (gender,
                                                              declension, case,
                                                              num)

    return results
Beispiel #43
0
            data, airmass, Object, jd, parallactic_angle, mean_seeing = fromObs(file)       
            pwv = 0.
        if (inputype=='simu'):
            data, airmass, Object, jd, parallactic_angle, pwv = fromSimu(file)


        wmin = 360.
        wmax = 1000.
        data = data[(data[:,0]>= wmin) & (data[:,0]<= wmax)]
   
        ''' 
        Smoothing the atmospheric curve to match the resolution of the observation
        '''
        if ((inputype=='simu') and (seeing is not None)):
             sm   = smooth.smooth(data[:,0], data[:,1], seeing_x, seeing_y,
                                      pix2wgth = pix2wght,
                                      plot = plot)
             data[:,0], data[:,1] = sm.smoothCurve()
        if seeing is None and inputype is 'simu':
            mean_seeing = 0.
   

    
        '''
        --> Fitting a polynom on edges 
        --> measurement of the EW
        --> +/- 10 nm sliding
        --> picking the max
        --> fitting a parabola on data-continuum and retrieving the minimum
        '''
        for i in EW:
def conductGeneration(generation, corpus, previous_output):
    '''
        Conducts a generation of learning and testing on the input data

        Inputs
                generation (int) --- the number of the generation
                corpus (array) --- the lemmas and their info from reading the corpus file
                previous_output (dict) --- the output phonology of the previous generation
        Returns the output of the current generation--the expected outputs for the following generation
        '''

    # Build the right size network
    net = buildNetwork(input_nodes, constants.hidden_nodes,
                       constants.output_nodes)

    # Build the right size training set
    emptytraining_set = SupervisedDataSet(input_nodes, constants.output_nodes)

    # Initialize corpus object and expected output dictionary
    training_corpus = objects.Corpus(emptytraining_set)

    # Iterate through tokens and convert to binary
    for lemma in corpus:

        # Iterate through cases
        for case, form in lemma.cases.iteritems():

            # Add words according to their frequencies
            training_corpus.addByFreq(constants.token_freq, form,
                                      previous_output[form.lemmacase])

    # Construct the training set
    print "--------Generation %s--------" % generation
    print "Constructing the training set"
    training_set = training_corpus.constructTrainingSet()

    # Construct the trainer
    trainer = BackpropTrainer(net, training_set)

    # Train
    print "Training the model"

    error = trainer.trainEpochs(constants.epochs)

    print "Number of Tokens in Training Set: %s" % len(training_set)

    results = {}

    # For each word in the test set, calculate output tuple
    print "Running the test set"

    # Counter to count correct
    ncorrect = 0

    for (form, input_tuple, expected_output) in training_corpus.test:

        # Activate the net, and smooth the output
        result = smooth(tuple(net.activate(input_tuple)), suf_dict)

        # Append output tuple to result
        results[form.lemmacase] = result

        # Add to ncorrect if matches previous
        if result == previous_output[form.lemmacase]: ncorrect += 1

        # Hash the output tuple to get the suffix result
        new_suf = inv_suf[result]

        form.output_change[generation] = new_suf

        print form.lemmacase, form.root + form.suffix, form.parent.declension, form.parent.gender, form.parent.totfreq, form.root + new_suf, new_suf

    print "Results have been determined"
    print "Percentage correct in test run: %f" % round(
        float(ncorrect) / float(len(previous_output)) * 100, 2)

    return results
Beispiel #45
0
#!/usr/bin/env python

import numpy as np
from smooth import smooth
import pylab
from scipy.optimize import curve_fit

#
hist0 = np.load("anglespectrum0.npy")
hist180 = np.load("anglespectrum180.npy")
hist0 = smooth(hist0)[:hist0.size]
hist180 = smooth(hist180)[:hist180.size]

# fft
iq0 = np.fft.fft(hist0)
iq180 = np.fft.fft(hist180)
# corr = iq0 * np.conjugate(iq180)
corr = iq180 * np.conjugate(iq0)
corr /= np.abs(corr)
r = np.fft.ifft(corr)
#
r = np.real(r)

# the argmax of r should be what we want.
# - only data within a few degrees are useful
r[10:350] = 0
index = np.argmax(r[1:]) + 1
width = 2
peak = r[index - width:index + width + 1]

Beispiel #46
0
def videoStabilizationMovement(data_path, blockSize, areaOfSearch, Backward,
                               ID):

    print '------Video Stabilization--------'

    # List all the files in the sequence dataset
    frames_list = sorted(os.listdir(data_path))
    nFrames = len(frames_list)

    frame_dir_0 = os.path.join(data_path, frames_list[0])
    first_frame = cv2.imread(frame_dir_0, 0)

    motionVectors = np.zeros(
        [nFrames, first_frame.shape[0], first_frame.shape[1], 2])

    print 'Motion estimation step'
    for idx in range(0, nFrames - 1):

        print '     --> Analyzing frame ', frames_list[idx], '...'
        frame_dir_curr = os.path.join(data_path, frames_list[idx])
        frame_dir_next = os.path.join(data_path, frames_list[idx + 1])

        if Backward:
            toExplore_img = cv2.imread(frame_dir_curr, 0)
            curr_img = cv2.imread(frame_dir_next, 0)

        else:
            curr_img = cv2.imread(frame_dir_curr, 0)
            toExplore_img = cv2.imread(frame_dir_next, 0)

        #OF_image = calcOpticalFlowBM(curr_img, toExplore_img, blockSize, areaOfSearch)
        OF_image = cv2.calcOpticalFlowFarneback(curr_img,
                                                toExplore_img,
                                                pyr_scale=0.5,
                                                levels=3,
                                                winsize=30,
                                                iterations=5,
                                                poly_n=5,
                                                poly_sigma=1.2,
                                                flags=0)
        # showOpticalFlowHSVBlockMatching(OF_image, frames_list[idx], saveResult=True, visualization = 'HS')
        motionVectors[idx, :, :, 0:2] = OF_image

    point = [138, 83]  # Reference point from which the sequence is stabilized
    plotMotionEstimation(motionVectors, point, name='beforeSmoothing')
    motionVectors[:, 138, 83, 0] = smooth(motionVectors[:, 138, 83, 0],
                                          window_len=1)
    motionVectors[:, 138, 83, 1] = smooth(motionVectors[:, 138, 83, 1],
                                          window_len=1)
    plotMotionEstimation(motionVectors, point, name='afterSmoothing')

    print ' -->Motion estimation completed!'

    print 'Motion compensation step'
    for idx in range(0, nFrames):

        print '     --> Compensating frame ', frames_list[idx], '...'
        frame_dir = os.path.join(data_path, frames_list[idx])

        toCompensate = cv2.imread(frame_dir, 1)
        M = np.float32([[1, 0, -motionVectors[idx][128, 83, 0]],
                        [0, 1, -motionVectors[idx][138, 83, 1]]])
        compensated = cv2.warpAffine(
            toCompensate, M, (first_frame.shape[1], first_frame.shape[0]))
        cv2.imwrite('results/Stabilized/' + ID + '/input/' + frames_list[idx],
                    compensated)

    np.save('results/Stabilized/' + ID + '/motionVectors', motionVectors)
    np.save('results/Stabilized/' + ID + '/point', point)
    return motionVectors, point
def conductGeneration(generation, corpus, previous_output):
        '''
        Conducts a generation of learning and testing on the input data

        Inputs
                generation (int) --- the number of the generation
                corpus (array) --- the lemmas and their info from reading the corpus file
                previous_output (dict) --- the output phonology of the previous generation
        Returns the output of the current generation--the expected outputs for the following generation
        '''
        # Build the right size network
        net = buildNetwork(input_nodes, constants.hidden_nodes, constants.output_nodes)

        # Build the right size training set
        emptytraining_set = SupervisedDataSet(input_nodes, constants.output_nodes)

        # Initialize corpus object and expected output dictionary
        training_corpus = objects.Corpus(emptytraining_set)

        # Iterate through tokens and convert to binary
        for lemma in corpus:

                # Iterate through cases
                for case, form in lemma.cases.iteritems():

                        # Create the input tuple
                        form.createInputTuple(input_nodes, root_size)

                        # Add words according to their frequencies
                        training_corpus.addByFreq(constants.token_freq, form, previous_output[form.lemmacase])

        # Print information
        print "--------Generation %s--------" % generation
        # if generation >= constants.gnvdrop_generation:
        #         print "Genitive Case Dropped"

        # Construct the training set
        print "Constructing the training set"
        training_set = training_corpus.constructTrainingSet()

        # Construct the trainer
        trainer = BackpropTrainer(net, training_set)

        # Train
        print "Training the model"
        
        error = trainer.trainEpochs(constants.epochs)
        
        print "Number of Tokens in Training Set: %s" % len(training_set)
        print "Training Error: %s" % error

        results = {}

        # For each word in the test set, calculate output tuple
        print "Running the test set"

        # Counter to count correct. Exclude -'s from total phonemes
        ncorrect = 0
        tot_phon = 0

        for (form, input_tuple, expected_output) in training_corpus.test:             

                # # Determine if we should drop the genitive
                drop_gen = generation >= constants.gnvdrop_generation

                # Activate the net, and smooth the output
                result = smooth(tuple(net.activate(input_tuple)), gendrop=drop_gen, hierarchy=constants.hierarchy)  

                # Append output tuple to result
                results[form.lemmacase] = result

                # Hash the output tuple to get the phonological form result
                new_phonology = ''
                
                # Divide tuple into chunks (each 12 units, representing one phoneme)
                chunked_list = list(chunks(list(result), 12))
                # Divide previous output tuple into chunks
                chunked_prev = list(chunks(list(previous_output[form.lemmacase]), 12))

                for phon_index in range(len(chunked_list)):
                        phoneme = chunked_list[phon_index]
                        prev_phoneme = chunked_prev[phon_index]

                        new_phonology += constants.feat_to_phon[tuple(phoneme)]
                        # If phoneme matches, add to number correct
                        if prev_phoneme != [0.5]*12:
                                tot_phon += 1
                                if phoneme == prev_phoneme: 
                                        ncorrect += 1

                print form.lemmacase, form.parent.declension, form.parent.gender, new_phonology

                # Set input change once we figure out how to deal with the phonology
                form.output_change[generation] = new_phonology.replace('-', '')

        print "Results have been determined"

        print "Percentage correct in test run: {:.2f}".format(float(ncorrect)/float(tot_phon)*100)

        return results
Beispiel #48
0
def iter_smooth(array, loops=6, window_len=3):
    for l in np.arange(loops):
        array = sm.smooth(array, window_len=window_len)
    return array
Beispiel #49
0
	d = d/max(d) + 0.1*noise
	'''
    #====================================================================================
    d = dMat[count, :]

    nbin = np.size(d)
    print 'size(d):', np.size(d)
    '''
	plt.figure(20)
	plt.plot(x,d,'-')
	plt.figure(21)
	plt.plot(x,model,'-')
	plt.show()
	'''
    # shifting the peak to center
    peak = np.argmax(smooth(d, 5))
    d = np.roll(d, (int(nbin * 0.1) - peak))
    peak = np.argmax(smooth(d, 5))

    #ON_Off Pulse regions
    #j3 = peak - int(0.25*nbin)#/((count+1)**0.5))
    j2 = peak + int(0.25 * nbin)  #/((count+1)**0.5))
    j3 = 1
    print 'Bin Selection Range', j2, j3

    # REMOVING Pedestal
    mean0 = np.median(d)
    d = d - mean0
    d = d / np.max(d)
    #==================Off and On Pulse region=============================='
    offpulse2 = d[j2:(nbin - 1)]
Beispiel #50
0
def prep_beam(binfile, matfile, stations_model, ndays=1,  nhours=1, fsamp=10., threshold_std=0.5, onebit=False,
              tempfilter=False, specwhite=True, timenorm=False, fact=10, new=False,
              fftpower=7, freq_int=(0.02, 0.4)):
    """
    Prepare the raw data before the beamforming. This comprises bandpass
    filtering, cutting the data into smaller chunks, removing the mean and
    down-weighting strong transient signals from for example earthquakes. One
    can chose between several methods to remove transients: 1-bit normalization,
    time domain normalization, which computes a smoothed traces of the absolute
    amplitude and down-weights the original trace by this smoothed trace, and a
    threshold based method that clips the trace at a predefined factor of the
    traces standard deviation. Note that currently no instrument correction is
    applied so the input traces either have to be already corrected for the
    instrument response, they need to be filtered within a frequency band for
    which all instrument responses are flat or they need to be all from the
    same instruments.

    :param files: Day long vertical component SAC files.
    :param matfile: Name of the file to which the pre-processed traces are
                    written. This saves the time of repeatedly running the
                    pre-processing for beamformer runs with different parameters.
                    The output file is a *.mat file that can also be read with
                    Matlab.
    :param statons_model:   
    :param nhours: Input data is cut into chunks of length of nhours.
    :param fsamp:  Sampling frequency of the input data.
    :param threshold_std: Clipping factor; values greater than
                          treshold_std * std(trace) are set to threshold_std.
    :param onebit: Turn on/off 1-bit normalization
    :param tempfilter: Turn on/off threshold based clipping.
    :param specwhite: Turn on/off spectral whitening (only retain spectral phase
                      and set spectral amplitude to one.
    :param timenomr: Turn on/off time domain normalization.
    :param fact: Decimation factor.
    :param new: If set to false it will try to load all return values from
                matfile. If true it will compute all return values from scratch.
    :param fftpower: Length of data chunks cut before the FFT.
    :param freq_int: Frequency interval for the band-pass filter.

    :return fseis: Pre-processed traces in the frequency domain.
    :return freqs: Frequency array corresponding to fseis
    :return slats: Station latitudes.
    :return slons: Station longitudes.
    :return dt: Sampling interval after decimation.
    :return seissmall: Pre-processed traces in the time domain.
    """
    if new:
        print (' >> Prepare raw data before beamforming...')
        ntimes = int(ndays) * int(round(24 / nhours))
        step = int( nhours * 3600 * fsamp / fact )
        stations = []
        slons = array([])
        slats = array([])
        nfiles = np.shape(binfile)[0]
        seisband = zeros((nfiles, ntimes, step))
        freqs = fftfreq(2 ** fftpower, 1. / (fsamp / fact))
        for i, (t, s) in enumerate(zip(binfile,stations_model)):
            data = t
            print (i, s.station)
            slons = append(slons, s.lon)
            slats = append(slats, s.lat)
            stations.append(s.station)
            data -= data.mean()
            data = bandpass(data, freqmin=freq_int[0], freqmax=freq_int[1], df=fsamp,
                   corners=4, zerophase=True)
            if fact != 1:
                data = integer_decimation(data, fact)
            npts = len(data) 
            df = fsamp / fact  
            dt = 1./df 
            seis0 = zeros(ndays * 24 * 3600 * int(df))
            istart = int(round(((UTCDateTime(0).hour * 60 + UTCDateTime(0).minute) * 60\
                          + UTCDateTime(0).second) * df))
            if timenorm:
                smoothdata = smooth(abs(data), window_len=257, window='flat')
                data /= smoothdata
            if np.isnan(data).any():
                data = zeros(ndays * 24 * 3600 * int(df))
            try:
                seis0[istart:(istart + npts)] = data
            except (ValueError, e):
                print ('Problem with %s'%s.station )
                raise ValueError

            seis0 -= seis0.mean()
            # iterate over nhours
            for j in np.arange(ntimes):
                ilow = j * step
                iup = (j + 1) * step
                seisband[i, j, :] = seis0[ilow:iup]
                seisband[i, j, :] -= seisband[i, j, :].mean()
                if onebit:
                    seisband[i, j, :] = sign(seisband[i, j, :])
            if tempfilter:
                sigmas = seisband[i, :, :].std(axis=1, ddof=1)
                sgm = ma.masked_equal(array(sigmas), 0.).compressed()
                sigma = sqrt(sum(sgm ** 2) / sgm.size)
                threshold = threshold_std * sigma
                seisband[i] = where(abs(seisband[i]) > threshold, threshold * sign(seisband[i]), seisband[i])
                seisband[i] = apply_along_axis(lambda e: e - e.mean(), 1, seisband[i])

        ismall = 2 **fftpower
        ipick = arange(ismall)
        taper = cosine_taper(len(ipick))
        n = ndays * nhours * 3600 * df
        nsub = int(np.floor(n / ismall))  # Number of time pieces -20 mins long each
        seissmall = zeros((nfiles, ntimes, nsub, len(ipick)))
        for ii in np.arange(nfiles):
            for jj in np.arange(ntimes):
                for kk in np.arange(nsub):
                    seissmall[ii, jj, kk, :] = seisband[ii, jj, kk * ismall + ipick] * taper
        fseis = fft(seissmall, n=2**fftpower, axis=3)
        if np.isnan(fseis).any():
            print ("NaN found")
            return
        ind = np.where((freqs > freq_int[0]) & (freqs < freq_int[1]))[0]
        fseis = fseis[:, :, :, ind]
        if specwhite:
            fseis = exp(angle(fseis) * 1j)

        sio.savemat(matfile, {'fseis':fseis, 'slats':slats, 'slons':slons, 'dt':dt, 
                              'freqs':freqs[ind]})
        return fseis, freqs[ind], slats, slons, dt
    else:
        print (' >> Reading %s and passing preprocess...'%matfile)
        a = sio.loadmat(matfile)
        fseis = a['fseis']
        freqs = np.squeeze(a['freqs'])
        slats = np.squeeze(a['slats'])
        slons = np.squeeze(a['slons'])
        dt = a['dt'][0][0]
        return fseis, freqs, slats, slons, dt 
Beispiel #51
0
def compareUV(data,
              threeDim,
              depth=5,
              plot=False,
              save_csv=False,
              debug=False,
              debug_plot=False):
    """
    Does a comprehensive validation process between modeled and observed.
    Outputs a list of important statistics for each variable, calculated
    using the TidalStats class

    Inputs:
        - data = dictionary containing all necessary observed and model data
        - threeDim = boolean flag, 3D or not
    Outputs:
       - elev_suite = dictionary of useful statistics for sea elevation
       - speed_suite = dictionary of useful statistics for flow speed
       - dir_suite = dictionary of useful statistics for flow direction
       - u_suite = dictionary of useful statistics for u velocity component
       - v_suite = dictionary of useful statistics for v velocity component
       - vel_suite = dictionary of useful statistics for signed flow velocity
       - csp_suite = dictionary of useful statistics for cubic flow speed
    Options:
       - depth = interpolation depth from surface, float
       - plot = boolean flag for plotting results
       - save_csv = boolean flag for saving statistical benchmarks in csv file
    """
    if debug: print "CompareUV..."
    # take data from input dictionary
    mod_time = data['mod_time']
    if not data['type'] == 'Drifter':
        obs_time = data['obs_time']

        mod_el = data['mod_timeseries']['elev']
        obs_el = data['obs_timeseries']['elev']
    else:
        obs_time = data['mod_time']

    # Save path & create folder
    name = data['name']
    save_path = name.split('/')[-1].split('.')[0] + '/'
    while exists(save_path):
        save_path = save_path[:-1] + '_bis/'
    mkdir(save_path)

    # Check if 3D simulation
    if threeDim:
        obs_u_all = data['obs_timeseries']['u']
        obs_v_all = data['obs_timeseries']['v']
        mod_u_all = data['mod_timeseries']['u']
        mod_v_all = data['mod_timeseries']['v']
        bins = data['obs_timeseries']['bins']
        siglay = data['mod_timeseries']['siglay']
        # use depth interpolation to get a single timeseries
        mod_depth = mod_el + np.mean(obs_el[~np.isnan(obs_el)])
        (mod_u, obs_u) = depthFromSurf(mod_u_all,
                                       mod_depth,
                                       siglay,
                                       obs_u_all,
                                       obs_el,
                                       bins,
                                       depth=depth,
                                       debug=debug,
                                       debug_plot=debug_plot)
        (mod_v, obs_v) = depthFromSurf(mod_v_all,
                                       mod_depth,
                                       siglay,
                                       obs_v_all,
                                       obs_el,
                                       bins,
                                       depth=depth,
                                       debug=debug,
                                       debug_plot=debug_plot)
    else:
        if not data['type'] == 'Drifter':
            obs_u = data['obs_timeseries']['ua']
            obs_v = data['obs_timeseries']['va']
            mod_u = data['mod_timeseries']['ua']
            mod_v = data['mod_timeseries']['va']
        else:
            obs_u = data['obs_timeseries']['u']
            obs_v = data['obs_timeseries']['v']
            mod_u = data['mod_timeseries']['u']
            mod_v = data['mod_timeseries']['v']

    if debug: print "...convert times to datetime..."
    mod_dt, obs_dt = [], []
    for i in mod_time:
        mod_dt.append(dn2dt(i))
    for j in obs_time:
        obs_dt.append(dn2dt(j))

    if debug: print "...put data into a useful format..."
    mod_spd = np.sqrt(mod_u**2.0 + mod_v**2.0)
    obs_spd = np.sqrt(obs_u**2.0 + obs_v**2.0)
    mod_dir = np.arctan2(mod_v, mod_u) * 180.0 / np.pi
    obs_dir = np.arctan2(obs_v, obs_u) * 180.0 / np.pi
    if not data['type'] == 'Drifter':
        obs_el = obs_el - np.mean(obs_el[~np.isnan(obs_el)])
    # Chose the component with the biggest variance as sign reference
    if np.var(mod_v) > np.var(mod_u):
        mod_signed = np.sign(mod_v)
        obs_signed = np.sign(obs_v)
    else:
        mod_signed = np.sign(mod_u)
        obs_signed = np.sign(obs_u)

    if debug:
        print "...check if the modeled data lines up with the observed data..."
    if (mod_time[-1] < obs_time[0] or obs_time[-1] < mod_time[0]):
        raise PyseidonError("---time periods do not match up---")

    else:
        if debug:
            print "...interpolate the data onto a common time step for each data type..."
        if not data['type'] == 'Drifter':
            # elevation
            (mod_el_int, obs_el_int, step_el_int,
             start_el_int) = smooth(mod_el,
                                    mod_dt,
                                    obs_el,
                                    obs_dt,
                                    debug=debug,
                                    debug_plot=debug_plot)
            # speed
            (mod_sp_int, obs_sp_int, step_sp_int,
             start_sp_int) = smooth(mod_spd,
                                    mod_dt,
                                    obs_spd,
                                    obs_dt,
                                    debug=debug,
                                    debug_plot=debug_plot)
            # direction
            (mod_dr_int, obs_dr_int, step_dr_int,
             start_dr_int) = smooth(mod_dir,
                                    mod_dt,
                                    obs_dir,
                                    obs_dt,
                                    debug=debug,
                                    debug_plot=debug_plot)
            # u velocity
            (mod_u_int, obs_u_int, step_u_int,
             start_u_int) = smooth(mod_u,
                                   mod_dt,
                                   obs_u,
                                   obs_dt,
                                   debug=debug,
                                   debug_plot=debug_plot)
            # v velocity
            (mod_v_int, obs_v_int, step_v_int,
             start_v_int) = smooth(mod_v,
                                   mod_dt,
                                   obs_v,
                                   obs_dt,
                                   debug=debug,
                                   debug_plot=debug_plot)
            # velocity i.e. signed speed
            (mod_ve_int, obs_ve_int, step_ve_int,
             start_ve_int) = smooth(mod_spd * mod_signed,
                                    mod_dt,
                                    obs_spd * obs_signed,
                                    obs_dt,
                                    debug=debug,
                                    debug_plot=debug_plot)
            # cubic signed speed
            #mod_cspd = mod_spd**3.0
            #obs_cspd = obs_spd**3.0
            mod_cspd = mod_signed * mod_spd**3.0
            obs_cspd = obs_signed * obs_spd**3.0
            (mod_cspd_int, obs_cspd_int, step_cspd_int,
             start_cspd_int) = smooth(mod_cspd,
                                      mod_dt,
                                      obs_cspd,
                                      obs_dt,
                                      debug=debug,
                                      debug_plot=debug_plot)
        else:
            # Time steps
            step = mod_time[1] - mod_time[0]
            start = mod_time[0]

            # Already interpolated, so no need to use smooth...
            # speed
            (mod_sp_int, obs_sp_int, step_sp_int,
             start_sp_int) = (mod_spd, obs_spd, step, start)
            # direction
            (mod_dr_int, obs_dr_int, step_dr_int,
             start_dr_int) = (mod_dir, obs_dir, step, start)
            # u velocity
            (mod_u_int, obs_u_int, step_u_int, start_u_int) = (mod_u, obs_u,
                                                               step, start)
            # v velocity
            (mod_v_int, obs_v_int, step_v_int, start_v_int) = (mod_v, obs_v,
                                                               step, start)
            # velocity i.e. signed speed
            (mod_ve_int, obs_ve_int, step_ve_int,
             start_ve_int) = (mod_spd, obs_spd, step, start)
            # cubic signed speed
            #mod_cspd = mod_spd**3.0
            #obs_cspd = obs_spd**3.0
            mod_cspd = mod_signed * mod_spd**3.0
            obs_cspd = obs_signed * obs_spd**3.0
            (mod_cspd_int, obs_cspd_int, step_cspd_int,
             start_cspd_int) = (mod_cspd, obs_cspd, step, start)

    if debug: print "...remove directions where velocities are small..."
    MIN_VEL = 0.1
    indexMin = np.where(obs_sp_int < MIN_VEL)
    obs_dr_int[indexMin] = np.nan
    obs_u_int[indexMin] = np.nan
    obs_v_int[indexMin] = np.nan
    obs_ve_int[indexMin] = np.nan
    obs_cspd_int[indexMin] = np.nan

    indexMin = np.where(mod_sp_int < MIN_VEL)
    mod_dr_int[indexMin] = np.nan
    mod_u_int[indexMin] = np.nan
    mod_v_int[indexMin] = np.nan
    mod_ve_int[indexMin] = np.nan
    mod_cspd_int[indexMin] = np.nan

    if debug: print "...get stats for each tidal variable..."
    gear = data['type']  # Type of measurement gear (drifter, adcp,...)
    if not gear == 'Drifter':
        elev_suite = tidalSuite(gear,
                                mod_el_int,
                                obs_el_int,
                                step_el_int,
                                start_el_int, [], [], [], [], [], [],
                                kind='elevation',
                                plot=plot,
                                save_csv=save_csv,
                                save_path=save_path,
                                debug=debug,
                                debug_plot=debug_plot)
    else:
        elev_suite = []
    speed_suite = tidalSuite(gear,
                             mod_sp_int,
                             obs_sp_int,
                             step_sp_int,
                             start_sp_int, [], [], [], [], [], [],
                             kind='speed',
                             plot=plot,
                             save_csv=save_csv,
                             save_path=save_path,
                             debug=debug,
                             debug_plot=debug_plot)
    dir_suite = tidalSuite(gear,
                           mod_dr_int,
                           obs_dr_int,
                           step_dr_int,
                           start_dr_int,
                           mod_u,
                           obs_u,
                           mod_v,
                           obs_v,
                           mod_dt,
                           obs_dt,
                           kind='direction',
                           plot=plot,
                           save_csv=save_csv,
                           save_path=save_path,
                           debug=debug,
                           debug_plot=debug_plot)
    u_suite = tidalSuite(gear,
                         mod_u_int,
                         obs_u_int,
                         step_u_int,
                         start_u_int, [], [], [], [], [], [],
                         kind='u velocity',
                         plot=plot,
                         save_csv=save_csv,
                         save_path=save_path,
                         debug=debug,
                         debug_plot=debug_plot)
    v_suite = tidalSuite(gear,
                         mod_v_int,
                         obs_v_int,
                         step_v_int,
                         start_v_int, [], [], [], [], [], [],
                         kind='v velocity',
                         plot=plot,
                         save_csv=save_csv,
                         save_path=save_path,
                         debug=debug,
                         debug_plot=debug_plot)

    # TR: requires special treatments from here on
    vel_suite = tidalSuite(gear,
                           mod_ve_int,
                           obs_ve_int,
                           step_ve_int,
                           start_ve_int,
                           mod_u,
                           obs_u,
                           mod_v,
                           obs_v,
                           mod_dt,
                           obs_dt,
                           kind='velocity',
                           plot=plot,
                           save_csv=save_csv,
                           save_path=save_path,
                           debug=debug,
                           debug_plot=debug_plot)
    csp_suite = tidalSuite(gear,
                           mod_cspd_int,
                           obs_cspd_int,
                           step_cspd_int,
                           start_cspd_int,
                           mod_u,
                           obs_u,
                           mod_v,
                           obs_v,
                           mod_dt,
                           obs_dt,
                           kind='cubic speed',
                           plot=plot,
                           save_csv=save_csv,
                           save_path=save_path,
                           debug=debug,
                           debug_plot=debug_plot)

    # output statistics in useful format

    if debug: print "...CompareUV done."

    return (elev_suite, speed_suite, dir_suite, u_suite, v_suite, vel_suite,
            csp_suite)
def pipeline(params, nimage, roiA, roiB, wm):

  if params.get('pipeline')=='STOCHASTIC':

          #print "Pipeline : STOCHASTIC"

          ppserversL=("localhost",)
          #ppserversL=("*",)

          data = nimage.getImage()

          ####! swap x and z for volume coming from Slicer - do not forget tp apply the inverse before to send them back
          data = data.swapaxes(2,0)
          print 'pipeline data type : %s' % str(data.dtype)
          ####
          shpD = data.shape
          print 'pipeline data shape : %s' % str(shpD)
          orgS = nimage.get('origin')
          org = [float(orgS[0]), float(orgS[1]), float(orgS[2])]
          print 'Origin : %s:%s:%s' % (str(org[0]), str(org[1]), str(org[2]))


          spaS = nimage.get('spacing')
          spa = [float(spaS[0]), float(spaS[1]), float(spaS[2])]
          print 'Spacing : %s:%s:%s' % (str(spa[0]), str(spa[1]), str(spa[2]))

          G = nimage.get('grads')
          b = nimage.get('bval')
          i2r = nimage.get('ijk2ras')
          i2rd = nimage.get('ijk2rasd')
          mu = nimage.get('mu')
          dims = nimage.get('dimensions')

 
          #print 'DWI : ', params.get('dwi')

          isInRoiA = False
          if params.hasKey('roiA'):
                         roiAR = numpy.fromstring(roiA.getImage(), 'uint16')
                         roiAR = roiAR.reshape(shpD[2], shpD[1], shpD[0]) # because come from Slicer - will not send them back so swap them one for all
                         roiAR = roiAR.swapaxes(2,0)
                         roiAR[roiAR>0]=1
                         roiA.setImage(roiAR)
                         isInRoiA = True
                         #print "RoiA : %s" % str(roiAR.shape)
 
                         isDir = os.access('paths0', os.F_OK)
                         if not isDir:
                            os.mkdir('paths0')


          isInRoiB = False
          if params.hasKey('roiB'):
                        if params.get('roiB') != params.get('roiA'):
                              roiBR = numpy.fromstring(roiB.getImage(), 'uint16')
                              roiBR = roiBR.reshape(shpD[2], shpD[1], shpD[0])
                              roiBR = roiBR.swapaxes(2,0)
                              roiBR[roiBR>0]=1
                              roiB.setImage(roiBR)
                              isInRoiB = True 
                              #print "RoiB : %s" % str(roiBR.shape)

                              isDir = os.access('paths1', os.F_OK)
                              if not isDir:
                                 os.mkdir('paths1')

 
          isInWM = False
          if params.hasKey('wm'):
                         wmR = numpy.fromstring(wm.getImage(), 'uint16')
                         wmR = wmR.reshape(shpD[2], shpD[1], shpD[0])
                         wmR = wmR.swapaxes(2,0)
                         wm.setImage(wmR)
                         isInWM = True 
                         #print "WM : %s" % str(wmR.shape)

          isInTensor = False

          print "Input volumes loaded!"

          # values per default
          smoothEnabled = False

          wmEnabled = True
          infWMThres = 300
          supWMThres = 900

          tensEnabled =True
          bLine = 0

          stEnabled = True
          totalTracts = 500
          maxLength = 200
          stepSize = 0.5
          stopEnabled = True
          fa = 0.0

          cmEnabled = False
          probMode = 0

          # got from client
          # special handling for bools
          if params.hasKey('smoothEnabled'):
                    smoothEnabled = bool(int(params.get('smoothEnabled')))
          if params.hasKey('wmEnabled'):
                    wmEnabled = bool(int(params.get('wmEnabled')))
          if params.hasKey('tensEnabled'):
                    tensEnabled = bool(int(params.get('tensEnabled')))
          if params.hasKey('stEnabled'):
                    stEnabled = bool(int(params.get('stEnabled')))
          if params.hasKey('cmEnabled'):
                    cmEnabled = bool(int(params.get('cmEnabled')))
          if params.hasKey('spaceEnabled'):
                    spaceEnabled = bool(int(params.get('spaceEnabled')))
          if params.hasKey('stopEnabled'):
                    stopEnabled = bool(int(params.get('stopEnabled')))
          if params.hasKey('faEnabled'):
                    faEnabled = bool(int(params.get('faEnabled')))
          if params.hasKey('traceEnabled'):
                    traceEnabled = bool(int(params.get('traceEnabled')))
          if params.hasKey('modeEnabled'):
                    modeEnabled = bool(int(params.get('modeEnabled')))

          # can handle normally
          FWHM = numpy.ones((3), 'float')
          if params.hasKey('stdDev'):
                    FWHM[0] = float(params.get('stdDev')[0])
                    FWHM[1] = float(params.get('stdDev')[1])
                    FWHM[2] = float(params.get('stdDev')[2])
                    print "FWHM: %s:%s:%s" % (str(FWHM[0]), str(FWHM[1]), str(FWHM[2]))


          if params.hasKey('infWMThres'):
                    infWMThres = int(params.get('infWMThres'))
                    print "infWMThres: %s" % str(infWMThres)
          if params.hasKey('supWMThres'):
                    supWMThres = int(params.get('supWMThres'))
                    print "supWMThres: %s" % str(supWMThres)

          if params.hasKey('bLine'):
                    bLine = int(params.get('bLine'))
                    print "bLine: %s" % str(bLine)
 
          if params.hasKey('tensMode'):
                    tensMode = params.get('tensMode')
                    print "tensMode: %s" % str(tensMode)


          if params.hasKey('totalTracts'):
                    totalTracts = int(params.get('totalTracts'))
                    print "totalTracts: %s" % str(totalTracts)
          if params.hasKey('maxLength'):
                    maxLength = int(params.get('maxLength'))
                    print "maxLength: %s" % str(maxLength)
          if params.hasKey('stepSize'):
                    stepSize = float(params.get('stepSize'))
                    print "stepSize: %s" % str(stepSize)
          if params.hasKey('fa'):
                    fa = float(params.get('fa'))
                    print "fa: %s" % str(fa)

          if params.hasKey('probMode'):
                    probMode = params.get('probMode')
                    print "probMode: %s" % str(probMode)

          if params.hasKey('lengthEnabled'):
                    lengthEnabled = params.get('lengthEnabled')
                    print "lengthEnabled: %s" % str(lengthEnabled)

          if params.hasKey('lengthClass'):
                    lengthClass = params.get('lengthClass')
                    print "lengthClass: %s" % str(lengthClass)
 


          ngrads = shpD[3] #b.shape[0]
          print "Number of gradients : %s" % str(ngrads)
          G = G.reshape(ngrads,3)
          b = b.reshape(ngrads,1)
          i2r = i2r.reshape(4,4)
          i2rd = i2rd.reshape(4,4)
          mu = mu.reshape(4,4)

          r2i = numpy.linalg.inv(i2r)
          r2id = numpy.linalg.inv(i2rd)

          mu2 = numpy.dot(r2id[:3, :3], mu[:3, :3])
          G2 = numpy.dot(G, mu2[:3, :3].T)


          vts = vects.vectors

          print "Tensor flag : %s" % str(tensEnabled)

          cm = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')
          cm2 = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')
          cm3 = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')
          cm4 = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')

          if smoothEnabled:
                    for k in range(shpD[3]):
                        timeSM0 = time.time()
                        data[...,k] = sm.smooth(data[...,k], FWHM, numpy.array([ spa[0], spa[1], spa[2] ],'float'))
                        print "Smoothing DWI volume %i in %s sec" % (k, str(time.time()-timeSM0))

          if wmEnabled:
                    wm = tensPP.EvaluateWM0(data, bLine, infWMThres, supWMThres)

                    if isInRoiA: # correcting brain mask with roi A
                       print "Correcting mask based on roiA"
                       tmpA = roiA.getImage()
                       wm[tmpA>0]=1

                    if isInRoiB: # correcting brain mask with roi A & B
                       print "Correcting mask based on roiB"
                       tmpB = roiB.getImage()
                       wm[tmpB>0]=1
          else: # avoid singularities in data
                    minVData = 10
                    wm = tensPP.EvaluateWM0(data, bLine, minVData, data[..., bLine].max())
                    wmEnabled = True # fix



          if isInWM or wmEnabled:

                    isDir = os.access('masks', os.F_OK)
                    if not isDir:
                       os.mkdir('masks')

                    tmpF = './masks/'
                    numpy.save(tmpF + 'wm.npy', wm)

                    indx = numpy.transpose(wm.nonzero())
                    print 'Total Number of voxels : ', shpD[0]*shpD[1]*shpD[2]*ngrads
                    print 'Masked voxels : ', indx.shape[0]*ngrads
                    print 'Index shape : ', indx.shape
                    
                    dataf = data.flatten()
                    cId = numpy.zeros((indx.shape[0]), 'uint32')
                    mdata = numpy.zeros((indx.shape[0], ngrads), data.dtype)
                    for i in range(indx.shape[0]):
                      cId[i] = indx[i][0]*shpD[1]*shpD[2]*shpD[3] +  indx[i][1]*shpD[2]*shpD[3]  + indx[i][2]*shpD[3]
                       
                      mdata[i,:]= dataf[cId[i]:cId[i]+ngrads]
                       
                    numpy.save(tmpF + 'index.npy', cId)
                    numpy.save(tmpF + 'mdata.npy', mdata)
                    


          if cmEnabled:
                    print "Compute tensor"
                    timeS1 = time.time()

                    roiFilterOnly = False 

                    monoP = False  

                    # multiprocessing support
                    dataBlocks = []
                    wmBlocks = []

                    nCpu = 2 # could be set to the number of available cores

                    nParts = 1
                    if shpD[2]>0 and nCpu>0 :

                      job_server = pp.Server(ppservers=ppserversL)
                       
                      ncpusL = job_server.get_ncpus()
                      print "Number of cores on local machine : %s" % str(ncpusL)
                      print "Number of active computing nodes : %s" % str(nCpu)


                      if shpD[2] >= nCpu:
                         nParts = nCpu
                      else:
                         nParts = shpD[2]

                      for i in range(nParts): 
                        datax = data[:, :, i*shpD[2]/nParts:(i+1)*shpD[2]/nParts, :]
                        print "data block %i dimension : %s" % (i, str(datax.shape))
                        dataBlocks.append(datax)
                        if isInWM or wmEnabled:
                           wmx = wm[:, :, i*shpD[2]/nParts:(i+1)*shpD[2]/nParts]
                           wmBlocks.append(wmx)
                    else:
                       monoP = True

                            
                    if not monoP:
                       jobs = []
                       
                    
                       job_server.set_ncpus(ncpus = ncpusL)

                       for i in range(nParts):
                          jobs.append(job_server.submit(tensPP.EvaluateTensorX1, (dataBlocks[i], G2.T, b.T, wmBlocks[i],),(tensPP.ComputeAFunctional, tensPP.ComputeTensorFunctional,), ("numpy","time",) ))


                       tBlocks = []
                       for i in range(nParts):
                          tBlocks.append(jobs[i]())


                       lV  = numpy.zeros((shpD[0], shpD[1], shpD[2], 3) , 'float')
                       EV  = numpy.zeros((shpD[0], shpD[1], shpD[2], 3, 3), 'float' )
                       xVTensor = numpy.zeros((shpD[0], shpD[1], shpD[2], 7), 'float')
                       xYTensor = numpy.zeros((shpD[0], shpD[1], shpD[2], 9), 'float')
                       xTensor0 = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'float')

                       for i in range(nParts):
                          EV[:, :, i*shpD[2]/nParts:(i+1)*shpD[2]/nParts, ...]= tBlocks[i][0]
                          lV[:, :, i*shpD[2]/nParts:(i+1)*shpD[2]/nParts, :]= tBlocks[i][1]
                          xVTensor[:, :, i*shpD[2]/nParts:(i+1)*shpD[2]/nParts, ...]= tBlocks[i][2]
                          xYTensor[:, :, i*shpD[2]/nParts:(i+1)*shpD[2]/nParts, ...]= tBlocks[i][3]
                          
                       xTensor0[...]= xVTensor[..., 0]

                       lVType = lV.dtype
                       EVType = EV.dtype
                       xVTensorType = xVTensor.dtype
                       wmType = wm.dtype

                       # computation of alpha, beta, logmu0 and principal eigenvector
                       tdata = numpy.zeros((indx.shape[0], 6), 'float')

                       for i in range(indx.shape[0]):
                         cId[i] = indx[i][0]*shpD[1]*shpD[2]*shpD[3] +  indx[i][1]*shpD[2]*shpD[3]  + indx[i][2]*shpD[3]
                       
                         l = lV[indx[i][0], indx[i][1], indx[i][2], :]
                         index = numpy.argsort(abs(l))[::-1] 
                         l =l[index,:]
      
                         # Set point estimates in the Constrained model
                         E = EV[indx[i][0], indx[i][1], indx[i][2], ...]
                         alpha = (l[1]+l[2])/2
                         beta = l[0] - alpha

                         logmu0 = xTensor0[indx[i][0], indx[i][1], indx[i][2]]
                         e = E[:, index[0]]

                         tdata[i,0]= alpha
                         tdata[i,1]= beta
                         tdata[i,2]= logmu0
                         tdata[i,3:]= e
                       
                       numpy.save(tmpF + 'tdata.npy', tdata)



                       isDir = os.access('tensors', os.F_OK)
                       if not isDir:
                          os.mkdir('tensors')

                       tmpF = './tensors/'
                       numpy.save(tmpF + 'eigenv.npy', EV)
                       numpy.save(tmpF + 'lambda.npy', lV)
                       numpy.save(tmpF + 'tensor.npy', xVTensor)
                       numpy.save(tmpF + 'tensor0.npy', xTensor0)
                       numpy.save(tmpF + 'vectors.npy', vts.T)

                       dateT = str(int(round(time.time())))
    
                       isDir = os.access('outputs', os.F_OK)
                       if not isDir:
                         os.mkdir('outputs')

                       tmpF = './outputs/'


                       i2r.tofile(tmpF + 'trafo_' + dateT + '.ijk')

                       if smoothEnabled:
                         ga = data[..., bLine]
                         ga = ga.swapaxes(2,0)
                         tmp= 'smooth_' + dateT
                         ga.tofile(tmpF + tmp + '.data')
                         createParams(ga, tmpF + tmp)

                       if wmEnabled:
                         wm = wm.swapaxes(2,0)
                         tmp= 'brain_' + dateT
                         wm.tofile(tmpF + tmp + '.data')
                         createParams(wm, tmpF + tmp)


                       if cmEnabled:
                         xVTensor = xVTensor.swapaxes(2,0)
                         xVTensor = xVTensor.astype('float32') # slicerd do not support double type yet
                         xYTensor = xYTensor.swapaxes(2,0)
                         xYTensor = xYTensor.astype('float32') # slicerd do not support double type yet
                         tmp= 'tensor_' + dateT
                         xYTensor.tofile(tmpF + tmp + '.data')
                         createParams(xYTensor, tmpF + tmp, True)
                       

                       if faEnabled:
                          faMap = tensPP.CalculateFA0(lV)
                          faMap = faMap.swapaxes(2,0)
                          tmp= 'fa_' + dateT
                          faMap.tofile(tmpF + tmp + '.data')
                          createParams(faMap, tmpF + tmp)

                       if traceEnabled:
                          trMap = tensPP.CalculateTrace0(lV)
                          trMap = trMap.swapaxes(2,0)
                          tmp= 'trace_' + dateT
                          trMap.tofile(tmpF + tmp + '.data')
                          createParams(trMap, tmpF + tmp)

                       if modeEnabled:
                          moMap = tensPP.CalculateMode0(lV)
                          moMap = moMap.swapaxes(2,0)
                          tmp= 'mode_' + dateT
                          moMap.tofile(tmpF + tmp + '.data')
                          createParams(moMap, tmpF + tmp)
                       

                       del tBlocks
                       del dataBlocks
                       del wmBlocks
                       del lV
                       del EV  
                       del xVTensor
                       del xYTensor
 
                    else:
                       pass

                    print "Compute tensor in %s sec" % str(time.time()-timeS1)

                    
                    print "Track fibers"
                    if not stopEnabled:
                        fa = 0.0

                    if isInRoiA:
                        # ROI A
                        print "Search ROI A"
                        roiP = cmpV.march0InVolume(roiA.getImage())

                        shpR = roiP.shape
                        print "ROI A dimension : %s" % str(shpR)
          
                         
                        blocksize = totalTracts
                        IJKstartpoints = []

                        monoP = False  


                        nParts = 1
                        if shpR[0]>0 and nCpu>0 :
                           if shpR[0] >= nCpu:
                               nParts = nCpu
                           else:
                               nParts = shpR[0]

                           for i in range(nParts): 
                              roiPx = roiP[i*shpR[0]/nParts:(i+1)*shpR[0]/nParts, :] 
                              print "ROI A %i dimension : %s" % (i, str(roiPx.shape))
                              IJKstartpoints.append(numpy.tile(roiPx,( blocksize, 1)))
                        else:
                           IJKstartpoints.append(numpy.tile(roiP,( blocksize, 1)))
                           monoP = True

                        timeS2 = time.time()

                        # multiprocessing
                        print "Data type : %s" % str(data.dtype)
                       
                        if not monoP:
                           jobs = []


                           for i in range(nParts):
                              jobs.append(job_server.submit(trackPP.TrackFiberYFM40, (i, 0, params.get('location'), nimage.get('fullname'), nimage.get('type'), shpD, b.T, G2.T, IJKstartpoints[i].T, r2i, i2r, r2id, i2rd, spa,\
                                     'vectors.npy', vects.vectors.dtype, vects.vectors.T.shape, 'lambda.npy', lVType, 'eigenv.npy', EVType, 'tensor0.npy', xVTensorType, 'wm.npy' ,\
                                     wmType, stepSize, maxLength, fa, spaceEnabled,),(), ("numpy","time",) ))

                           
                           res = jobs[0]()
                           for i in range(nParts-1):
                             res = jobs[i+1]()


                           print "Track fibers in %s sec" % str(time.time()-timeS2)


                           if not roiFilterOnly:
                             print "Connect tract"

                             jobs = []
                             cm = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')

                             for j in range(nParts):
                               print "Number of paths : %s" % str(IJKstartpoints[j].shape[0])
                               pathsIJKId = './paths0/' + 'unit_' + str(j) + '_IJK.npy' 
                               pathsLENId = './paths0/' + 'unit_' + str(j) + '_LEN.npy' 
                               if probMode=='binary':
                                 jobs.append(job_server.submit(trackPP.ConnectFibersPZM0, ( params.get('location'), pathsIJKId, pathsLENId, shpD, lengthEnabled),\
                                                  (trackPP.ComputeConnectFibersFunctionalP0, trackPP.ComputeConnectFibersFunctionalP1, trackPP.ComputeConnectFibersFunctionalP2,),\
                                                       ("numpy","time",) )) 
                               elif probMode=='cumulative':
                                 jobs.append(job_server.submit(trackPP.ConnectFibersPZM1, ( params.get('location'), pathsIJKId, pathsLENId, shpD, lengthEnabled),\
                                                  (trackPP.ComputeConnectFibersFunctionalP0, trackPP.ComputeConnectFibersFunctionalP1, trackPP.ComputeConnectFibersFunctionalP2,),\
                                                       ("numpy","time",) ))
                               else:
                                 jobs.append(job_server.submit(trackPP.ConnectFibersPZM2, ( params.get('location'), pathsIJKId, pathsLENId, shpD, lengthEnabled),\
                                                  (trackPP.ComputeConnectFibersFunctionalP0, trackPP.ComputeConnectFibersFunctionalP1, trackPP.ComputeConnectFibersFunctionalP2,),\
                                                       ("numpy","time",) ))


                             cm = jobs[0]()
                             for j in range(nParts-1):
                               cm += jobs[j+1]()


                        else:
                           pass

                    if isInRoiB:
                        # ROI B
                        print "Search ROI B"
                        roiP2 = cmpV.march0InVolume(roiB.getImage())

                        shpR2 = roiP2.shape
                        print "ROI B dimension : %s" % str(shpR2)
          

                        blocksize = totalTracts
                        IJKstartpoints2 = []

                        monoP = False  

                        nParts2 = 1
                        if shpR2[0]>0 and nCpu>0 :
                           if shpR2[0] >= nCpu:
                               nParts2 = nCpu
                           else:
                               nParts2 = shpR2[0]

                           for i in range(nParts2): 
                              roiPx = roiP2[i*shpR2[0]/nParts2:(i+1)*shpR2[0]/nParts2, :] 
                              print "ROI B %i dimension : %s" % (i, str(roiPx.shape))    
                              IJKstartpoints2.append(numpy.tile(roiPx,( blocksize, 1)))
                        else:
                           IJKstartpoints2.append(numpy.tile(roiP2,( blocksize, 1)))
                           monoP = True

                        timeS3 = time.time()

                        # multiprocessing
                        print "Data type : %s" % str(data.dtype)
                       
                        if not monoP:
                           jobs = []


                           for i in range(nParts2):
                              jobs.append(job_server.submit(trackPP.TrackFiberYFM40, (i, 1, params.get('location'), nimage.get('fullname'), nimage.get('type'), shpD, b.T, G2.T, IJKstartpoints2[i].T, r2i, i2r, r2id, i2rd, spa,\
                                     'vectors.npy', vects.vectors.dtype, vects.vectors.T.shape, 'lambda.npy', lVType, 'eigenv.npy', EVType, 'tensor0.npy', xVTensorType, 'wm.npy' ,\
                                     wmType, stepSize, maxLength, fa, spaceEnabled,),(), ("numpy","time",) ))



                           res = jobs[0]()
                           for i in range(nParts2-1):
                              res = jobs[i+1]()

                           print "Track fibers in %s sec" % str(time.time()-timeS3)

                           if not roiFilterOnly:
                             print "Connect tract"

                             jobs = []
                             cm2 =  numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')

                             for j in range(nParts2):
                               print "Number of paths : %s" % str(IJKstartpoints2[j].shape[0])
                               pathsIJKId = './paths1/' + 'unit_' + str(j) + '_IJK.npy' 
                               pathsLENId = './paths1/' + 'unit_' + str(j) + '_LEN.npy' 
                               if probMode=='binary':
                                 jobs.append(job_server.submit(trackPP.ConnectFibersPZM0, ( params.get('location'), pathsIJKId, pathsLENId, shpD, lengthEnabled),\
                                                         (trackPP.ComputeConnectFibersFunctionalP0, trackPP.ComputeConnectFibersFunctionalP1, trackPP.ComputeConnectFibersFunctionalP2,),\
                                                               ("numpy","time",) ))
                               elif probMode=='cumulative':
                                 jobs.append(job_server.submit(trackPP.ConnectFibersPZM1, ( params.get('location'), pathsIJKId, pathsLENId, shpD, lengthEnabled),\
                                                         (trackPP.ComputeConnectFibersFunctionalP0, trackPP.ComputeConnectFibersFunctionalP1, trackPP.ComputeConnectFibersFunctionalP2,),\
                                                               ("numpy","time",) ))
                               else:
                                 jobs.append(job_server.submit(trackPP.ConnectFibersPZM2, ( params.get('location'), pathsIJKId, pathsLENId, shpD, lengthEnabled),\
                                                         (trackPP.ComputeConnectFibersFunctionalP0, trackPP.ComputeConnectFibersFunctionalP1, trackPP.ComputeConnectFibersFunctionalP2,),\
                                                               ("numpy","time",) ))

                             cm2 = jobs[0]()
                             for j in range(nParts2-1):
                               cm2 += jobs[j+1]()


                        else:
                           pass

                    if isInRoiA and isInRoiB:

                        if not monoP:
                          vicinity= 1
                          threshold = 0.1
                          minLength = 4

                          print "Try out connecting"
                          jobs = []
                          cm3 = numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')

                          counter1 = 0
                          counter2 = 0
                          counterA1 = 0
                          counterB1 = 0
                          counterA2 = 0
                          counterB2 = 0

                          Pr = 0.0
                          Fa = 0.0
                          Wa = 0.0

                          for i in range(nParts):
                             print "Number of paths : %s" % str(IJKstartpoints[i].shape[0])
                             #pathsRASId = './paths0/' + 'unit_' + str(i) + '_RAS.npy'
                             pathsIJKId = './paths0/' + 'unit_' + str(i) + '_IJK.npy'
                             #pathsANISId = './paths0/' + 'unit_' + str(i) + '_ANIS.npy'
                             pathsLOGPId = './paths0/' + 'unit_' + str(i) + '_LOGP.npy'
                             pathsLENId = './paths0/' + 'unit_' + str(i) + '_LEN.npy'

                             jobs.append(job_server.submit(trackPP.FilterFibersZM0, (params.get('location'), pathsIJKId, pathsLOGPId, pathsLENId, roiA.getImage(), roiB.getImage(), shpD,\
                                            counter1, counter2, counterA1, counterB1, counterA2, counterB2, Pr, threshold, vicinity), (), ("numpy","time",) ))


                          cm3 = jobs[0]()
                          for i in range(nParts-1):
                            cm3 += jobs[i+1]()

                          print "Filtering of fibers done from region A to region B"
                          if counter1>0:
                            print "Number of curves connecting : %s" % str(counter1)
                            print "Mean probability : %s" % str(Pr/float(counter1))

                          jobs = []
                          cm4 =  numpy.zeros((shpD[0], shpD[1], shpD[2]), 'uint32')


                          counter1 = 0
                          counter2 = 0
                          counterA1 = 0
                          counterB1 = 0
                          counterA2 = 0
                          counterB2 = 0

                          Pr = 0.0
                          Fa = 0.0
                          Wa = 0.0

                          for i in range(nParts2):
                             print "Number of paths : ", str(IJKstartpoints2[i].shape[0])
                             #pathsRASId = './paths1/' + 'unit_' + str(i) + '_RAS.npy'
                             pathsIJKId = './paths1/' + 'unit_' + str(i) + '_IJK.npy'
                             #pathsANISId = './paths1/' + 'unit_' + str(i) + '_ANIS.npy'
                             pathsLOGPId = './paths1/' + 'unit_' + str(i) + '_LOGP.npy'
                             pathsLENId = './paths1/' + 'unit_' + str(i) + '_LEN.npy' 

                             jobs.append(job_server.submit(trackPP.FilterFibersZM0, (params.get('location'), pathsIJKId, pathsLOGPId, pathsLENId, roiB.getImage(), roiA.getImage(), shpD,\
                                            counter1, counter2, counterA1, counterB1, counterA2, counterB2, Pr, threshold, vicinity), (), ("numpy","time",) ))

                          cm4 = jobs[0]()
                          for i in range(nParts2-1):
                            cm4 += jobs[i+1]()


                          print "Filtering of fibers done from region B to region A"
                          if counter1>0:
                            print "Number of curves connecting : %s" % str(counter1)
                            print "Mean probability : %s" %  str(Pr/float(counter1))

                        else:
                          pass
                  

          else:
                     print "No tractography to execute!"



          if cmEnabled:

                     if isInRoiA and not roiFilterOnly:
                          if not (cm == 0).all():
                            cm = cm.swapaxes(2,0)
                            tmp= 'cmA_' + dateT
                            cm.tofile(tmpF + tmp + '.data')
                            createParams(cm,  tmpF + tmp)

                     if isInRoiB and not roiFilterOnly:
                          if not (cm2 == 0).all():
                            cm2 = cm2.swapaxes(2,0)
                            tmp= 'cmB_' + dateT
                            cm2.tofile(tmpF + tmp + '.data')
                            createParams(cm2,  tmpF + tmp)


                     if isInRoiA and isInRoiB and not roiFilterOnly:
                          cm1a2 = cm[...]*cm2[...]/2.0
                          cm1a2 = cm1a2.astype('uint32')
                          if not (cm1a2 == 0).all():
                            tmp= 'cmAandB_' + dateT
                            cm1a2.tofile(tmpF + tmp + '.data')
                            createParams(cm1a2,  tmpF + tmp)


                            tmp= 'cmFAandB_' + dateT
                            cm1a2f = cm1a2/float(cm1a2.max())
                            cm1a2f.astype('float32')
                            cm1a2f.tofile(tmpF + tmp + '.data')
                            createParams(cm1a2f,  tmpF + tmp)

                                       
                          cm1o2 = (cm[...]+cm2[...])/2.0
                          cm1o2 = cm1o2.astype('uint32')
                          if not (cm1o2 == 0).all():
                            tmp= 'cmAorB_' + dateT
                            cm1o2.tofile(tmpF + tmp + '.data')
                            createParams(cm1o2,  tmpF + tmp)

                            tmp= 'cmFAorB_' + dateT
                            cm1o2f = cm1o2/float(cm1o2.max())
                            cm1o2f.astype('float32')
                            cm1o2f.tofile(tmpF + tmp + '.data')
                            createParams(cm1o2f,  tmpF + tmp)

                     if isInRoiA and isInRoiB: 
                          if not (cm3 == 0).all():
                            tmp= 'cmA2B_' + dateT
                            cm3 = cm3.swapaxes(2,0)
                            cm3.tofile(tmpF + tmp + '.data')
                            createParams(cm3,  tmpF + tmp)

                            tmp= 'cmFA2B_' + dateT
                            cm3f = cm3/float(cm3.max())
                            cm3f.astype('float32')
                            cm3f.tofile(tmpF + tmp + '.data')
                            createParams(cm3f,  tmpF + tmp)

                          if not (cm4 == 0).all():
                            tmp= 'cmB2A_' + dateT
                            cm4 = cm4.swapaxes(2,0)
                            cm4.tofile(tmpF + tmp + '.data')
                            createParams(cm4,  tmpF + tmp)

                            tmp= 'cmFB2A_' + dateT
                            cm4f = cm4/float(cm4.max()) 
                            cm4f.astype('float32')
                            cm4f.tofile(tmpF + tmp + '.data')
                            createParams(cm4f,  tmpF + tmp)


          print "pipeline STOCHASTIC, data shape end : %s" % str(shpD)
Beispiel #53
0
def loaddata_denoise(input_dir,
                     ref_dir,
                     signal_length=5000,
                     sigmoid=5,
                     fs=320):
    X = []
    Y = []
    QRS = []

    datapaths = glob.glob(os.path.join(input_dir, '*.mat'))
    datapaths.sort()
    for data_path in datapaths:

        # print(data_path)

        # load data
        data_mat = sio.loadmat(data_path)
        data = data_mat['ecg']
        data = np.array(data, dtype=np.float32)
        signal = data
        # plt.subplot(2,1,1)
        # plt.plot(signal[0:2000])

        # normalize the data
        # data = scale(data).astype(np.float32)
        # normalize the data
        for i in range(signal.shape[1]):
            smoothed_signal = smooth.smooth(signal[:, i],
                                            window_len=int(fs * 0.886),
                                            window='flat')
            signal[:, i] = signal[:, i] - smoothed_signal

        # denoise ECG
        for i in range(signal.shape[1]):
            # DWT
            coeffs = pywt.wavedec(signal[:, i], 'db4', level=3)
            # compute threshold
            noiseSigma = 0.01
            threshold = noiseSigma * math.sqrt(
                2 * math.log2(signal[:, i].size))
            # apply threshold
            newcoeffs = coeffs
            for j in range(len(newcoeffs)):
                newcoeffs[j] = pywt.threshold(coeffs[j],
                                              threshold,
                                              mode='soft')

            # IDWT
            signal[:, i] = pywt.waverec(newcoeffs, 'db4')[0:len(signal)]

        # resample
        signal_new = np.zeros((signal_length, signal.shape[1]))
        for i in range(signal.shape[1]):
            signal_new[:, i] = resample(signal[:, i], signal_length)

        signal = signal_new

        # scaler = StandardScaler()
        # scaler.fit(signal)
        # signal = scaler.transform(signal).astype(np.float32)
        # plt.subplot(2,1,2)
        # plt.plot(signal[0:2000])
        # plt.show()
        X.append(signal)

    X = np.array(X, dtype=np.float32)
    print('X.shape', X.shape)

    refpaths = glob.glob(os.path.join(ref_dir, '*.mat'))
    refpaths.sort()
    for ref_path in refpaths:

        # load reference
        ref_mat = sio.loadmat(ref_path)
        R_peaks = ref_mat['R_peak']
        R_peaks = np.array(R_peaks, dtype=np.int32).squeeze()
        R_peaks = (R_peaks * (signal_length / len(data))).astype(np.int32)
        ref = smooth_ref(signal_length, R_peaks, sigmoid=sigmoid)
        ref = np.expand_dims(ref, axis=-1)
        Y.append(ref)
        QRS.append(R_peaks)

    Y = np.array(Y, dtype=np.float32)

    print('Y.shape', Y.shape)

    # remove duplicate rows
    #X,indexes = np.unique(X,  return_index=True, axis=0)
    #print(X.shape)
    # Y = Y[indexes]
    # QRS = itemgetter(*indexes)(QRS)

    # shuffle
    X, Y, QRS = shuffle(X, Y, QRS, random_state=0)

    return X, Y, QRS
Beispiel #54
0
def iter_smooth(array, loops, window_len):
    for l in range(loops):
        array = sm.smooth(array, window_len, 'flat')
    return array
Beispiel #55
0
 def smooth_avg_consumption(self):
     self.consumption_per_interval_smoothed = smooth(
         np.array(self.consumption_per_interval), 11)
Beispiel #56
0
         '/NN_aiSIRIS_ASSAY_MPP_SCIP_SGinf_LITH_ROCKGRP.csv')

df = pd.read_csv(fpath, low_memory=False)

values = np.log10(df.loc[df['holeid'].isin(df['holeid'].mode()), 'Scpt:0.001_SI']).values
plt.plot(values)
np.random.seed(1)

L = 1e5
Nw = 784
Ns = Nw/2

time = np.arange(L)
values = np.random.random(time.shape)

spike = int(np.random.choice(time))

values[spike:spike+10000] += 5

values = smooth(values, int(Ns))

# values = electrocardiogram()[10000:20000]

# plt.plot(data)

X = make_nd_windows(values, Nw, steps=Ns)

tensor_x = torch.Tensor(X)  # transform to torch tensor
syn_dataset = data.TensorDataset(tensor_x) # create your datset
syn_dataloader = data.DataLoader(syn_dataset, batch_size=1) # create your dataloader
Beispiel #57
0
plt.figure()
plt.plot(V, I)

plt.figure()
plt.plot(chR, chL)

#%% Opamp

file = 'inversor_div-11-3.txt'  #circuito inverson con un divisor de tensión por 11/3 en la lectura
completa = os.path.join('Measurements', file)
datos = np.loadtxt(completa)
datos = datos[15000:20000, :]
sr = 42100

chR, chL = np.split(datos, 2, axis=1)
chR = smooth(chR.flatten())
chL = smooth(chL.flatten())

chR /= max(chR)
chL /= max(chL)

fL = fft(chL)
fR = fft(chR)

N = len(fL)

frecs = np.linspace(0.0, 1.0 / (2.0 / sr), N // 2)

freqR = frecs[np.argmax(2.0 / N * np.abs(fR[0:N // 2]))]
freqL = frecs[np.argmax(2.0 / N * np.abs(fL[0:N // 2]))]
def inception_phase_20_0(sm, num):

    mode = 0;
    fp = open("auto_phase_11.py", "w")

#start composing MIDI
    fp.write("#Inception phase 11\n");
    fp.write("import midi\n\n\n")
    fp.write("def midi_gen():\n")
    fp.write("    hh_midi = midi.Pattern(tracks=[[midi.TimeSignatureEvent(tick=0, data=[4, 2, 24, 8]),")
    midi_setup = ["    midi.KeySignatureEvent(tick=0, data=[0, 0]),\n",
              "    midi.EndOfTrackEvent(tick=1, data=[])],\n",
              "    [midi.ControlChangeEvent(tick=0, channel=0, data=[91, 58]),\n",
              "    midi.ControlChangeEvent(tick=0, channel=0, data=[10, 69]),\n",
              "    midi.ControlChangeEvent(tick=0, channel=0, data=[0, 0]),\n",
              "    midi.ControlChangeEvent(tick=0, channel=0, data=[32, 0]),\n",
              "    midi.ProgramChangeEvent(tick=0, channel=0, data=[0]),\n\n"]

    fp.writelines(midi_setup)

#automatic composing
    fp.write("#automatic composing\n")
    
    
    tick_0 = 6400
    tick_1 = 6400
    tick_2 = 9600
    
    beat_0 = 100
    beat_1 = 100
    beat_2 = 200
    
    onset_0 = 0.1
    onset_1 = 0.1
    onset_2 = 0.25

    
    
    # mode == 0 :
    chord_1 = C3
    chord_2 = G3
    chord_3 = A2
    chord_4 = E3
    chord_5 = F3
    chord_6 = C3
    chord_7 = D3
    chord_8 = G3
    chord_8_c = C3
    chord_1_b = F3
    chord_2_b = G3
    chord_3_b = C3
    chord_4_b = C3
    chord_5_b = D3
    chord_6_b = D3
    chord_7_b = E3
    chord_8_b = E3
        
    chorus_b_1 = F_major_bass
    chorus_b_2 = G_major_bass
    chorus_b_3 = C_major_bass
    chorus_b_4 = C_major_bass
    chorus_b_5 = D_minor_bass
    chorus_b_6 = D_minor_bass
    chorus_b_7 = E_major_bass
    chorus_b_8 = E_major_bass
        
    chorus_c_1 = C_major_bass
    chorus_c_2 = G_major_bass
    chorus_c_3 = A_minor_bass
    chorus_c_4 = E_minor_bass
    chorus_c_5 = F_major_bass
    chorus_c_6 = C_major_bass
    chorus_c_7 = D_minor_bass
    chorus_c_8 = G_major_bass
    chorus_c_8_c = C_major_bass
        
    scale_1 = C_major
    scale_2 = G_major
    scale_3 = A_minor
    scale_4 = E_minor
    scale_5 = F_major
    scale_6 = C_major
    scale_7 = D_minor
    scale_8 = G_major
    scale_8_c = C_major
    scale_1_b = F_major
    scale_2_b = G_major
    scale_3_b = C_major
    scale_4_b = C_major
    scale_5_b = D_minor
    scale_6_b = D_minor
    scale_7_b = E_major
    scale_8_b = E_major
    tone = 'major'
    tick_mode = tick_0
    beat = beat_0
    onset_mode = onset_0
    


    tick = 0
    note = 72;
    note_last = 72
    sm_th = 9

# AB
    while (tick < 6400):
        tick_round = tick%6400
        if tick_round < 800:
            chord = chord_1
            scale = scale_1
        elif 800 <= tick_round < 1600:
            chord = chord_2
            scale = scale_2
        elif 1600 <= tick_round < 2400:
            chord = chord_3
            scale = scale_3
        elif 2400 <= tick_round < 3200:
            chord = chord_4
            scale = scale_4
        elif 3200 <= tick_round < 4000:
            chord = chord_5
            scale = scale_5
        elif 4000 <= tick_round < 4800:
            chord = chord_6
            scale = scale_6
        elif 4800 <= tick_round < 5600:
            chord = chord_7
            scale = scale_7
        elif 5600 <= tick_round < 6400:
            chord = chord_8
            scale = scale_8
        
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, 80]),\n".format(100,chord))
        note = pt.gen_next_note(scale[0],mode)
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, 80]),\n".format(0,note))
        
        onset = rand.random()
        if onset > 0.3:
            v = 80
            note = pt.gen_next_note(note,mode)
        else:
            v = 0
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        
        note = pt.gen_next_note(note,mode)
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last =  note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        
        onset = rand.random()
        if onset > 0.4:
            v = 80
            note = pt.gen_next_note(note,mode)
        else:
            v = 0
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        tick = tick + 400
    
    
    note_last = 72
    sm_th = 9
    tick = 0
    # bridge
    while (tick < 6400):
        tick_round = tick%6400
        if tick_round < 800:
            chord = chord_1_b
            scale = scale_1_b
        elif 800 <= tick_round < 1600:
            chord = chord_2_b
            scale = scale_2_b
        elif 1600 <= tick_round < 2400:
            chord = chord_3_b
            scale = scale_3_b
        elif 2400 <= tick_round < 3200:
            chord = chord_4_b
            scale = scale_4_b
        elif 3200 <= tick_round < 4000:
            chord = chord_5_b
            scale = scale_5_b
        elif 4000 <= tick_round < 4800:
            chord = chord_6_b
            scale = scale_6_b
        elif 4800 <= tick_round < 5600:
            chord = chord_7_b
            scale = scale_7_b
        elif 5600 <= tick_round < 6400:
            chord = chord_8_b
            scale = scale_8_b
        
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, 80]),\n".format(100,chord))
        note = pt.gen_next_note(scale[0],mode)
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, 80]),\n".format(0,note))
        
        onset = rand.random()
        if onset > 0.2:
            v = 80
            note = pt.gen_next_note(note,mode)
        else:
            v = 0
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        
        note = pt.gen_next_note(note,mode)
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        
        onset = rand.random()
        if onset > 0.2:
            v = 80
            note = pt.gen_next_note(note,mode)
        else:
            v = 0
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        tick = tick + 800
    
    
    note_last = 72
    sm_th = 9
    tick = 0
    # C
    while (tick < 6400):
        tick_round = tick%6400
        if tick_round < 800:
            chord = chord_1
            scale = scale_1
        elif 800 <= tick_round < 1600:
            chord = chord_2
            scale = scale_2
        elif 1600 <= tick_round < 2400:
            chord = chord_3
            scale = scale_3
        elif 2400 <= tick_round < 3200:
            chord = chord_4
            scale = scale_4
        elif 3200 <= tick_round < 4000:
            chord = chord_5
            scale = scale_5
        elif 4000 <= tick_round < 4800:
            chord = chord_6
            scale = scale_6
        elif 4800 <= tick_round < 5200:
            chord = chord_7
            scale = scale_7
        elif 5200 <= tick_round < 5600:
            chord = chord_8
            scale = scale_8
        elif 5600 <= tick_round < 6400:
            chord = chord_8_c
            scale = scale_8_c
        
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, 80]),\n".format(100,chord))
        note = pt.gen_next_note(scale[0],mode)
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, 80]),\n".format(0,note))
        
        onset = rand.random()
        if onset > 0.1:
            v = 80
            note = pt.gen_next_note(note,mode)
        else:
            v = 0
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        
        note = pt.gen_next_note(note,mode)
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        
        onset = rand.random()
        if onset > 0.2:
            v = 80
            note = pt.gen_next_note(note,mode)
        else:
            v = 0
        note = smooth.smooth(note, note_last, sm, sm_th)
        note_last = note
        fp.write("    midi.NoteOnEvent(tick={}, data = [{}, {}]),\n".format(100,note,v))
        tick = tick + 400
        
#end automatic composing
    fp.write("#end automatic composing\n\n")
    
    
    fp.write("    midi.EndOfTrackEvent(tick=1, data=[])]])\n\n\n")
    fp.write("    midi.write_midifile(\"inception_phase_11_{}_{}.mid\", hh_midi)".format(tone,num))

    fp.close()
Beispiel #59
0
#prompts user for image path and opens image in a seperate window. Closes
import cv2
import numpy as np
from smooth import smooth
from rotate import rotate
from grayscale import gray

name = input('input name of image: ')
do = input(
    'What do you want to do to the image? Input R for rotate, G for grayscale or S for smooth:'
)

img = cv2.imread(name)
editimg = img.copy()

C = (int(len(img[0])))
R = (int(len(img)))

if do == 'R':
    editimg = rotate(R, C, img, editimg)
if do == 'G':
    editimg = gray(R, C, img, editimg)
if do == 'S':
    editimg = smooth(R, C, img, editimg)

cv2.imshow('image', editimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #60
0
def find_peaks(pntr_list, annot, ups=100, offset=100, smooth_param=50):
    """
        ups = 100 # how long to include upstream of TSS
        offset = 100 # how much to discard downstream of TSS for genic TSS detection
        smooth_param = 50 # approximate width of the TSS initiation region
        pntr_list: List of genome_db pointers formatted as [[pnt_pos,pnt_neg,Label], [pnt2_pos,pnt2_neg,Label2],...]
    """
    for pntr in pntr_list:
        df_peak = pd.DataFrame(
            {
                pntr[2] + '_Native_value': np.zeros((annot.shape[0])),
                pntr[2] + '_Native_position': np.zeros((annot.shape[0])),
                pntr[2] + '_Intragenic_value': np.zeros((annot.shape[0])),
                pntr[2] + '_Intragenic_position': np.zeros((annot.shape[0])),
                pntr[2] + '_Native_valueAS': np.zeros((annot.shape[0])),
                pntr[2] + '_Native_positionAS': np.zeros((annot.shape[0])),
                pntr[2] + '_Intragenic_valueAS': np.zeros((annot.shape[0])),
                pntr[2] + '_Intragenic_positionAS': np.zeros((annot.shape[0]))
            },
            index=annot['name'])
    for ix in tq(range(annot.shape[0])):
        gene = annot['name'].iloc[ix]
        chname = annot['chr'].iloc[ix]
        strand = annot['strand'].iloc[ix]
        start = annot['start'].iloc[ix]
        end = annot['end'].iloc[ix]
        for pntr in pntr_list:

            if strand == '+':
                tsP = pntr[0].get_nparray(chname, start - ups, end)
                tsN = pntr[1].get_nparray(chname, start - ups, end)
                vec = sm.smooth(tsP, smooth_param)[(smooth_param -
                                                    1):-(smooth_param - 1)]
                vecAS = sm.smooth(tsN, smooth_param)[(smooth_param -
                                                      1):-(smooth_param - 1)]
            elif strand == '-':
                tsP = pntr[0].get_nparray(chname, start, end + ups)
                tsN = pntr[1].get_nparray(chname, start, end + ups)
                vec = np.flipud(
                    sm.smooth(tsN, smooth_param)[(smooth_param -
                                                  1):-(smooth_param - 1)])
                vecAS = np.flipud(
                    sm.smooth(tsP, smooth_param)[(smooth_param -
                                                  1):-(smooth_param - 1)])

            df_peak.loc[gene, pntr[2] + '_Native_value'] = np.max(
                vec[:(ups + offset)])
            df_peak.loc[gene, pntr[2] + '_Native_position'] = np.argmax(
                vec[:(ups + offset)]) - ups
            df_peak.loc[gene, pntr[2] + '_Intragenic_value'] = np.max(
                vec[(ups + offset):])
            df_peak.loc[gene, pntr[2] + '_Intragenic_position'] = np.argmax(
                vec[(ups + offset):]) + offset

            df_peak.loc[gene, pntr[2] + '_Native_valueAS'] = np.max(
                vecAS[:(ups + offset)])
            df_peak.loc[gene, pntr[2] + '_Native_positionAS'] = np.argmax(
                vecAS[:(ups + offset)]) - ups
            df_peak.loc[gene, pntr[2] + '_Intragenic_valueAS'] = np.max(
                vecAS[(ups + offset):])
            df_peak.loc[gene, pntr[2] + '_Intragenic_positionAS'] = np.argmax(
                vecAS[(ups + offset):]) + offset

    return df_peak