Esempio n. 1
0
    def _read_iop_from_file(self, file_name):
        """
        Generic IOP reader that interpolates the iop to the common wavelengths defined in the constructor

        returns: interpolated iop
        """
        lg.info('Reading :: ' + file_name + ' :: and interpolating to ' + str(self.wavelengths))

        if os.path.isfile(file_name):
            iop_reader = csv.reader(open(file_name), delimiter=',', quotechar='"')
            wave = scipy.float32(iop_reader.next())
            iop = scipy.zeros_like(wave)
            for row in iop_reader:
                iop = scipy.vstack((iop, row))

            iop = scipy.float32(iop[1:, :])  # drop the first row of zeros
        else:
            lg.exception('Problem reading file :: ' + file_name)
            raise IOError

        try:
            int_iop = scipy.zeros((iop.shape[0], self.wavelengths.shape[1]))
            for i_iter in range(0, iop.shape[0]):
                # r = scipy.interp(self.wavelengths[0, :], wave, iop[i_iter, :])
                int_iop[i_iter, :] = scipy.interp(self.wavelengths, wave, iop[i_iter, :])
            return int_iop
        except IOError:
            lg.exception('Error interpolating IOP to common wavelength')
            return -1
Esempio n. 2
0
def prob5():
	rate, sig = wavfile.read('tada.wav')
	sig = sp.float32(sig)
	noise = sp.float32(sp.random.randint(-32767,32767,sig.shape))
	out = sp.ifft(sp.fft(sig)*sp.fft(noise))
	out = sp.real(out)
	out = sp.int16(out/sp.absolute(out).max() * 32767)
	wavfile.write('white-conv.wav',rate,out)
Esempio n. 3
0
def makeIR(wav_in,wav_out,fs,duration,noise=0.025):
    """ measures the response of a speaker (+amp+mic) and build an IR """
    # step 1: full duplex playback and recording. Input: provided sweep wav file
    # output: recorded time response
    ecasound_cmd="ecasound -f:16,1,%i -a:1 -i jack,system,capture " + \
    " -o /tmp/capture.wav -a:2 -i %s -o jack,system -t %i"
    ecasound_cmd=ecasound_cmd%(int(fs),wav_in,int(duration))
    # run capture    
    os.system(ecasound_cmd)
    # load input and capture wave files 
    time.sleep(3)
    f=wave.open(wav_in,'rb')
    len1=f.getnframes()
    #nc1=f.getnchannels()
    #bp1=f.getsampwidth()
    data=f.readframes(len1)
    f.close()
    Y1=scipy.float32(scipy.fromstring(data,dtype='int16'))
    f=wave.open('/tmp/capture.wav','rb')
    len2=f.getnframes()
    #nc1=f.getnchannels()
    #bp1=f.getsampwidth()
    data=f.readframes(len2)
    f.close()    
    Y2=scipy.float32(scipy.fromstring(data,dtype='int16'))
    # truncate and normalize wave file 
    #(or we could pad the shortest to the longest... TODO!)
    minlen = min([len1,len2])
    Y2=Y2[0:minlen]
    Y2=Y2/max(abs(Y2))
    Y1=Y1[0:minlen]
    Y1=Y1/max(abs(Y1))
    # compute frequency response function as ration of both spectra
    FRF=scipy.fft(Y2)/scipy.fft(Y1)
    # compute impulse response as inverse FFT of FRF
    IRraw=scipy.real(scipy.ifft(FRF))
    # get rid of initial lag in IR
    thr=max(abs(IRraw))*noise
    offset=max([0 , min(min(scipy.where(abs(IRraw)>thr)))-5 ])
    IR=IRraw[offset:-1] 
    IRnorm=IR/max(abs(IR))
    # TODO: add post pro options such as low/high pass and decay
    # write output IR
    f = wave.open(wav_out, 'w')
    f.setparams((1, 2, fs, 0, 'NONE', 'not compressed'))
    maxVol=2**15-1.0 #maximum amplitude
    wvData=""
    for i in range(len(IRnorm)):
        wvData+=pack('h', maxVol*IRnorm[i])
    f.writeframes(wvData)
    f.close()
Esempio n. 4
0
 def calc_gaussian_smooth(self):
     """ apply gaussian """
     xy = sp.float32(self.OptionsWindow.Options['filter_xy'])
     z = sp.float32(self.OptionsWindow.Options['filter_z'])
     filter_size = (xy,xy,z)
     
     for n in range(self.nFiles):
         self.Main.MainWindow.statusBar().showMessage("calculating gaussian smooth on Dataset " + str(n))
         if self.Main.Options.filter_target == 'raw':
             self.raw[:,:,:,n] = ndimage.gaussian_filter(self.data[:,:,:,n],filter_size)
         if self.Main.Options.filter_target == 'dFF':
             self.dFF[:,:,:,n] = ndimage.gaussian_filter(self.dFF[:,:,:,n],filter_size)
         pass
     self.Main.MainWindow.statusBar().clearMessage()
     pass
Esempio n. 5
0
def down_sample(filename, new_rate, outputfile=None):
    """
    Create a down-sampled copy of the provided .wav file.  Unless overridden, the output
        file will be of the form "down_<orginalname>.wav"
        
    Parameters
    ----------
    filename : string
        input .wav file
    new_rate : int
        sample rate of output file
    outputfile : string
        name of output file
    """

    if outputfile is None:
        outputfile = "down_" + filename

    old_rate, in_sig = wavfile.read(filename)
    in_sig = sp.float32(in_sig)
    fin = sp.fft(in_sig)
    nsiz = sp.floor(in_sig.size * new_rate / old_rate)
    nsizh = sp.floor(nsiz / 2)
    fout = sp.zeros(nsiz)
    fout = fout + 0j
    fout[0:nsizh] = fin[0:nsizh]
    fout[nsiz - nsizh + 1 :] = sp.conj(sp.flipud(fout[1:nsizh]))
    out = sp.ifft(fout)
    out = sp.real(out)  # Take the real component of the signal
    out = sp.int16(out / sp.absolute(out).max() * 32767)
    wavfile.write(outputfile, new_rate, out)
Esempio n. 6
0
def prob4(filename='saw.wav', new_rate = 11025, outfile='prob4.wav'):
    """Down-samples a given .wav file to a new rate and saves the resulting
    signal as another .wav file.
    
    Parameters
    ----------
    filename : string, optional
        The name of the .wav sound file to be down-sampled.
        Defaults to 'saw.wav'.
    new_rate : integer, optional
        The down-sampled rate. Defaults to 11025.
    outfile : string, optional
        The name of the new file. Defaults to prob4.wav.

    Returns
    -------
    None
    """
    old_rate, in_sig = wavfile.read(filename)
    fin = fftw.fft(sp.float32(in_sig))
    # Use if scipy_fftpack is unavailable
    # fin = sp.fft(sp.float32(in_sig))
    nsiz = sp.floor(in_sig.size * new_rate / old_rate)
    nsizh = sp.floor(nsiz / 2)
    fout = sp.zeros(nsiz) + 0j
    fout[0:nsizh] = fin[0:nsizh]
    fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh]))
    out = sp.real(sp.ifft(fout))
    out = sp.int16(out/sp.absolute(out).max() * 32767)
    plot_signal(filename)
    wavfile.write('prob4.wav',new_rate,out)
    print ""; plot_signal('prob4.wav')
Esempio n. 7
0
def prob3(filename='pianoclip.wav'):
    """Plots the spectrum of a given .wav file, then calculates the location
    and value of the largest spike. For the default value, the exact value is
    742.281519994 Hz (f#5 + 5 cents)
    
    Parameters
    ----------
    filename: string, optional
        The name of the .wav sound file to be examined.
        Defaults to 'pianoclip.wav'.

    Returns
    -------
    None
    """
    plot_signal(filename)
    rate, signal = wavfile.read(filename)
    signal = sp.float32(signal)
    fsignal = sp.absolute(fftw.fft(signal.T).T)
    # Use if scipy_fftpack is unavailable
    #fsignal = sp.absolute(sp.fft(signal, axis=0))
    plt.plot(fsignal[0:fsignal.shape[0]/2])
    plt.title("Spectrum of " + filename)
    plt.show()
    loc = fsignal[1:].argmax()
    val = fsignal[1:].max()
    print "\nSpike location:\t" + str(loc)
    print "Spike value:\t" + str(val)
    print "Hz:\t\t" + str(float(loc*rate)/signal.shape[0])
Esempio n. 8
0
def prob4():
	samplerate = 22050
	noise = sp.int16(sp.random.randint(-32767,32767,samplerate*10)) # Create 10 seconds of mono white noise
	wavfile.write('white_noise.wav',22050,noise)
	f = sp.fft(sp.float32(noise))
	plt.plot(sp.absolute(f))
	plt.show()
Esempio n. 9
0
File: TEMT.py Progetto: babonis/TEMT
 def posWeight(self, pos, tr_len, flag):
     
     p_FG = self.posProbFG(pos, tr_len, flag)
     p_BG = 1/float32(POS_BINS_NUM)
     weight = p_FG/p_BG
     
     return weight
Esempio n. 10
0
File: TEMT.py Progetto: babonis/TEMT
 def updateCountsFG(self, pos, tr_len, flag, mass = 1):
     
     tr_len_bin_i = getBinIndex(tr_len, 0)
     tr_pos_bin_i = getBinIndex(pos/float32(tr_len), 1)
     if flag == 0:
         self._counts_fg_5_end[tr_len_bin_i, tr_pos_bin_i] = self._counts_fg_5_end[tr_len_bin_i, tr_pos_bin_i] + mass
     elif flag == 1:
         self._counts_fg_3_end[tr_len_bin_i, tr_pos_bin_i] = self._counts_fg_3_end[tr_len_bin_i, tr_pos_bin_i] + mass
def plot_tada_spec():
    plt.close('all')
    rate, sig = wavfile.read('tada.wav')
    sig = sp.float32(sig)
    fsig = anfft.fft(sig.T).T
    plt.figure()
    plt.plot(sp.absolute(fsig))
    plt.savefig('tadaspec.pdf')
Esempio n. 12
0
def problem3(filename):    
    rate, sig = wavfile.read(filename)
    sig = sp.float32(sig)
    fsig = fftw.fft(sig,axis=0)
    fsig = fsig[1:len(fsig)/2]
    #return sp.argmax(fsig)/2
    plt.plot(sp.absolute(fsig))
    plt.show()
def plot_noise_spec():
    plt.close('all')
    rate, sig = wavfile.read('Noisysignal1.wav')
    sig = sp.float32(sig)
    fsig = anfft.fft(sig.T).T
    f = sp.absolute(fsig)
    plt.figure()
    plt.plot(f[0:f.shape[0]/2])
    plt.savefig('noisyspec.pdf')
def plot_tada_spec_left():
    plt.close('all')
    rate, sig = wavfile.read('tada.wav')
    sig = sp.float32(sig)
    fsig = anfft.fft(sig.T).T
    f = sp.absolute(fsig)
    plt.figure()
    plt.plot(f[0:f.shape[0]/2,:])
    plt.savefig('tadaspec2.pdf')
Esempio n. 15
0
File: TEMT.py Progetto: babonis/TEMT
 def posProbFG(self, pos, tr_len, flag): #linear space, pos: position index within transcript
     
     tr_len_bin_i = getBinIndex(tr_len, 0)
     tr_pos_bin_i = getBinIndex(pos/float32(tr_len), 1)
     if flag == 0:
         p = self._counts_fg_5_end[tr_len_bin_i, tr_pos_bin_i]/numpy.sum(self._counts_fg_5_end[tr_len_bin_i, :])
     elif flag == 1:
         p = self._counts_fg_3_end[tr_len_bin_i, tr_pos_bin_i]/numpy.sum(self._counts_fg_3_end[tr_len_bin_i, :])
         
     return p
def plot_sine_spec():
    plt.close('all')
    samplerate = 44100 # 44100 samples per second
    freq = 1760 # We’re going to produce a 1760 Hz sine wave ...
    length = 2 # ... which will last for 2 seconds.
    stepsize = freq*2*sp.pi/samplerate
    sig = sp.sin(sp.arange(0,stepsize*length*samplerate ,stepsize))     
    sig = sp.float32(sig)
    fsig = anfft.fft(sig)
    plt.plot(sp.absolute(fsig))
    plt.savefig('sinespec.pdf')
Esempio n. 17
0
	def differentiate( self ):
		Ntot = (self.Nx - 1)*(self.Ny - 1)

		tpb = 512
		if ( Ntot%tpb == 0 ): bpg = Ntot/tpb
		else: bpg = Ntot/tpb + 1

		Db = ( tpb, 1, 1 )
		Dg = ( bpg, 1 )

		self.diff( sc.int32(self.Nx), sc.int32(self.Ny), sc.float32(self.dx), self.dev_A, self.dev_dA, block=Db, grid=Dg )
Esempio n. 18
0
def white_noise(outfile='prob4.wav'):
    """Generate some white noise, write it to the specified outfile,
    and plot the spectrum (DFT) of the signal.
    """
    samplerate = 22050
    # Create 10 seconds of mono white noise
    noise = sp.int16(sp.random.randint(-32767,32767,samplerate*10)) 
    wavfile.write(outfile,22050,noise)
    f = sp.fft(sp.float32(noise))
    plt.plot(sp.absolute(f))
    plt.show()
Esempio n. 19
0
def read(fname, winsize):
    if fname == "-":
        wf = wave.open(sys.stdin, 'rb')
        n = wf.getnframes()
        str = wf.readframes(n)
        params = ((wf.getnchannels(), wf.getsampwidth(),
                   wf.getframerate(), wf.getnframes(),
                   wf.getcomptype(), wf.getcompname()))
        siglen = ((int)(len(str)/2/winsize) + 1) * winsize
        signal = sp.zeros(siglen, sp.float32)
        signal[0:len(str)/2] = sp.float32(sp.fromstring(str, sp.int16))/32767.0
        return signal, params
    else:
        return read_signal(fname, winsize)
Esempio n. 20
0
def read_signal(filename, winsize):
    wf = wave.open(filename, "rb")
    n = wf.getnframes()
    str = wf.readframes(n)
    params = (
        wf.getnchannels(),
        wf.getsampwidth(),
        wf.getframerate(),
        wf.getnframes(),
        wf.getcomptype(),
        wf.getcompname(),
    )
    siglen = ((int)(len(str) / 2 / winsize) + 1) * winsize
    signal = sp.zeros(siglen, sp.float32)
    signal[0 : len(str) / 2] = sp.float32(sp.fromstring(str, sp.int16)) / 32767.0
    return [signal, params]
def plot_down_saw_spec_correct():
    plt.close('all')
    rate, in_sig = wavfile.read('saw.wav')
    old_rate = 44100
    new_rate = 22050
    in_sig = sp.float32(in_sig)
    fin = anfft.fft(in_sig)
    nsiz = sp.floor(in_sig.size*new_rate/old_rate)
    nsizh = sp.floor(nsiz/2)
    fout = sp.zeros(nsiz)
    fout = fout + 0j
    fout[0:nsizh] = fin[0:nsizh]
    fout[nsiz-nsizh+1:] = sp.conj(sp.flipud(fout[1:nsizh]))
    f = sp.absolute(fout)
    plt.plot(f[0:f.shape[0]/2])
    plt.savefig('sawdownspec.pdf')
Esempio n. 22
0
 def calc_gaussian_smooth(self):
     """ apply gaussian """
     xy,z = sp.float32(self.Main.Options.preprocessing['filter_size'])
     filter_size = (xy,xy,z)
     
     for n in range(self.Main.Data.nTrials):
         self.Main.MainWindow.statusBar().showMessage("calculating gaussian smooth on Dataset " + str(n))
         if self.Main.Options.preprocessing['filter_target'] == 'raw':
             self.Main.Data.raw[:,:,:,n] = ndimage.gaussian_filter(self.Main.Data.raw[:,:,:,n],filter_size)
         if self.Main.Options.preprocessing['filter_target'] == 'dFF':
             self.Main.Data.dFF[:,:,:,n] = ndimage.gaussian_filter(self.Main.Data.dFF[:,:,:,n],filter_size)
     self.Main.MainWindow.statusBar().clearMessage()
     
     self.Main.Signals.updateDisplaySettingsSignal.emit()
     self.Main.MainWindow.Data_Display.Traces_Visualizer.update_traces()
     self.Main.MainWindow.Data_Display.Traces_Visualizer_Stimsorted.update_traces()
     pass
 def __build_loss_train__fn__(self):
     # create loss function
     prediction = layers.get_output(self.net)
     loss = objectives.categorical_crossentropy(prediction, self.__target_var__)
     loss = loss.mean() + 1e-4 * regularization.regularize_network_params(self.net, regularization.l2)
     
     val_acc = T.mean(T.eq(T.argmax(prediction, axis=1), self.__target_var__),dtype=theano.config.floatX)
     
     # create parameter update expressions
     params = layers.get_all_params(self.net, trainable=True)
     self.eta = theano.shared(sp.array(sp.float32(0.05), dtype=sp.float32))
     update_rule = updates.nesterov_momentum(loss, params, learning_rate=self.eta,
                                                 momentum=0.9)
     
     # compile training function that updates parameters and returns training loss
     self.__train_fn__ = theano.function([self.__input_var__,self.__target_var__], loss, updates=update_rule)
     self.__predict_fn__ = theano.function([self.__input_var__], layers.get_output(self.net,deterministic=True))
     self.__val_fn__ = theano.function([self.__input_var__,self.__target_var__], [loss,val_acc])
Esempio n. 24
0
File: TEMT.py Progetto: babonis/TEMT
def getReadsBias(inreads, filename, read_len, tr_ID, tr_num, intranscripts, bias_seq, bias_pos):
    i = 0
    read_ID_current = ''
    file_state = 0
    current_time = time.time()
    while file_state == 0:
        Y_i, read_ID_current, left_read_i, frag_end_read_i, file_state = pasRead(inreads, read_ID_current, tr_ID, tr_num)
        hit_num = len(left_read_i)
        mass_i = 1/float32(hit_num)
        bias_seq, bias_pos = updateBiasFG(left_read_i, frag_end_read_i, read_len, tr_ID, tr_num, intranscripts, mass_i, bias_seq, bias_pos)
        
        i = i + 1
        if i%10000 == 0:
            run_time = time.time() - current_time
            current_time = time.time()
            print '%s reads processed for counting reads bias of %s...' % (i, filename)
    
    inreads.seek(0)
    
    return (bias_seq, bias_pos)
Esempio n. 25
0
def loadScissors(datadir=NN_DATA_DIR, fn=nn_data_sets.NN_DATA_3D):
    '''
    Loads the 250K 3D points from the scissors point cloud data. These are simply 3D coordinates
    representing points scanned from the handle of a pair of scissors.
    '''
    with open( os.path.join(datadir,fn), "r") as f:
        tmp = f.readlines()
        
    #determine where the comments/header info in the file ends
    cur = 0
    while cur < len(tmp):
        linetxt = tmp[cur]
        if not (linetxt[0] == '#'): break
        cur += 1
    
    assert (cur < len(tmp)) #if this fails, then the whole file is header/comments
    
    dat = [ s.split('\n')[0].split() for s in tmp[cur:] ]
    M = scipy.float32(dat)
    return M
 def __init__(self,nn_name,batch_size=256,freeze=1,l_rates = sp.float32(0.05)*sp.ones(120,dtype=sp.float32),verbose = 1,subnet= None):
     self.nn_name = nn_name
     self.subnet = subnet
     if subnet != None and freeze:
         self.subnet.__freeze__()
     self.batch_size = batch_size
     self.verbose = verbose
     self.l_rates = l_rates
     self.__input_var__ = T.tensor4('X'+self.nn_name[:2])
     self.__target_var__ = T.ivector('y+'+self.nn_name[:2])
     self.max_epochs = self.l_rates.shape[0]
     if self.nn_name == '12-net':
         self.net = self.__build_12_net__()
     elif self.nn_name == '24-net':
         self.net = self.__build_24_net__()
     elif self.nn_name == '48-net':
         self.net = self.__build_48_net__()
     elif self.nn_name =='12-calib_net':
         self.net = self.__build_12_calib_net__()
     elif self.nn_name =='24-calib_net':
         self.net = self.__build_24_calib_net__()
     elif self.nn_name =='48-calib_net':
         self.net = self.__build_48_calib_net__()
     self.__build_loss_train__fn__()
Esempio n. 27
0
def loadMSER(datadir=os.path.join(NN_DATA_DIR,"ukbench_extract") ):
    '''
    From the Nister and Stewenius university of kentucky benchmark data set, you can download
    ukbench.zip, which includes 10,200 MSER files, each containing about 1000 MSER interest points
    extracted from images. Total data set size is 7,034,780 MSER points!
    @Note: Once the big matrix has been constructed by loading data from the 1000's of
    files, it's much more efficient for future work to save the big matrix to a single
    numpy file and reload in the future. See loadMSER_npy() function that will load
    the data from a numpy file named "MSER_7M.npy"
    '''
    mfiles = glob.glob( os.path.join( datadir, "mser*"))
    print "There are %d MSER files in the directory: %s."%(len(mfiles), datadir)
    if len(mfiles) < 1: return None
    
    dataMs = []
    for i,mserFile in enumerate(mfiles):
        with open(mserFile,"r") as f:
            lines = f.readlines()
        #first two lines are non-MSER fields
        lines = lines[2:]
        
        if i % 1000 == 0:
            print ""   #new line every 1000 MSER files processed
        if i % 100 == 0:
            print ". ", #show a dot every 100 MSER files processed
            sys.stdout.flush()
        
        #convert into a scipy 2D array N lines (points) by D dimensions
        X = scipy.array( [ scipy.float32(lx.split()) for lx in lines] )
        dataMs.append(X)
    
    M = scipy.vstack( dataMs ) #one giant matrix is the stack of the 10K matrices
    (rs,cs) = M.shape
    
    print "Data loaded. There are %d samples of %d-dimensional MSER points."%(rs, cs)
    return M
Esempio n. 28
0
# Simulate some uniform logits (this will be explained in a demo later)
auto_mask = sp.sparse.tril(np.ones([window_size, window_size]), k=0)
summary_mask = sp.sparse.lil_matrix((window_size, window_size))
summary_mask[:, window_size - blocksize[0]:] = 1
global_mask = sp.sparse.kron(
    sp.sparse.tril(np.ones([n_windows, n_windows]), k=-1), summary_mask)
global_mask = (global_mask +
               sp.sparse.kron(sp.sparse.eye(n_windows), auto_mask)).sign()

# Get the block sparse format
bsr = sp.sparse.bsr_matrix(global_mask, blocksize=blocksize)
bsr.eliminate_zeros()  # need to call this to eliminate blocks of all zeros

# The dense blocks
blocks = sp.float32(bsr.data)

# Dense mask for each active block
mask_data = np.array([[[1]]] * len(bsr.indices))
active_mask = sp.sparse.bsr_matrix(
    (mask_data, bsr.indices, bsr.indptr)).toarray()
# np.savetxt("active_mask.txt", active_mask, delimiter='', fmt="%i")
active_mask = sp.int64(active_mask.flatten())
print("Done creating input data")

# #### MODEL CREATION ####
builder = popart.Builder()
# Reshape the blocks to the desired format
blocks = sp.reshape(blocks, [blocks.shape[0], -1])
blocks = np.array(list(blocks), dtype=sp.float32)
logits = builder.addInitializedInputTensor(blocks)
Esempio n. 29
0
#rate2,sig2 = wavfile.read('balloon.wav')
#m = sig2.shape[0]
#sig1 = sp.append(sig1,sp.zeros((m,2)),axis = 0)
#sig2 = sp.append(sig2,sp.zeros((sig1.shape[0] - m,2)),axis = 0)
#f1 = anfft.fft(sig1.T).T
#f2 = anfft.fft(sig2.T).T
#out = anfft.ifft((f1*f2).T).T
#out = sp.real(out)
#scaled = sp.int16(out/sp.absolute(out).max() * 32767)
#wavfile.write('test.wav',44100,scaled)
#==============================================================================
# PROBLEM 4
#==============================================================================
#samplerate = 22050
#noise = sp.int16(sp.random.randint(-32767,32767,samplerate*10)) # Create 10 seconds of mono white noise
#wavfile.write('white_noise.wav',22050,noise)
#f = anfft.fft(sp.float32(noise))
#plt.plot(sp.absolute(f))
#plt.show()
#==============================================================================
# PROBLEM 5
#==============================================================================
rate, sig = wavfile.read('tada.wav')
sig = sp.float32(sig)
noise = sp.float32(sp.random.randint(-32767,32767,sig.shape))
out = anfft.ifft(anfft.fft(sig.T)*anfft.fft(noise.T)).T
out = sp.real(out)
out = sp.int16(out/sp.absolute(out).max() * 32767)
wavfile.write('white-conv.wav',rate,out)

Esempio n. 30
0
            for row in csvreader:
                if (row[1] == site) and (row[2] == sensor):  #and ((float(row[12]) > 0.0) and (float(row[13]) >= 0.0)):
                    date = datetime.datetime(int(row[4]), int(row[5]), int(row[6]))
                    #d_key = str(date.strftime("%Y"))
                    d_key = toYearFraction(date)
                    d_key = round(d_key, 2)
                    if not d_key in date_dict and int(row[13]) >= 0:
                        date_dict[d_key] = 1
                    elif d_key in date_dict and int(row[13]) >= 0:
                        date_dict[d_key] = date_dict[d_key] + 1

                    count += 1
                    cloud_flag_list.append(int(row[13]))
                    #print(str(row[12]) + ' :: ' + str(row[13]))

            ax.bar(scipy.float32(date_dict.keys()), scipy.float32(date_dict.values()), bar_width, alpha=opacity, color='b')
            ax.xaxis.set_major_formatter(majorFormatter)
            #ax.vlines(scipy.float32(date_dict.keys()), scipy.float32(date_dict.values()), bar_width, alpha=opacity)
            pylab.title(sensor + ' :: ' + site)
            pylab.xlabel('Decimal Year')
            pylab.ylabel('Number of manually cloud-screened images')
            pylab.grid()

            #pylab.show()
            pylab.savefig('./Cloudscreening_images/' + sensor + '--' + site + '.png')
            #pylab.clf()
            del (date_dict)
            del (date_list)
            print('--------------------')
            print(sensor)
            print('--------------------')
                         opVersion = 1,
                         domain = "ai.graphcore",
                         inputs = [logits],
                         attributes = {
                          "matrixDims": matrix_dims,
                          "blockSize": blocksize,
                          "sparsity": sparsity.tolist(),
                          "groupSizes": group_sizes.tolist(),
                          "subBlockMaskPerGroup": "[ZeroUpperTriangle, ZeroUpperTriangle, ZeroUpperTriangle, ZeroUpperTriangle]"
                         })[0]
dlogits = popart.reservedGradientPrefix() + logits  # the gradient tensor's name
upstream_grad = popart.reservedGradientPrefix() + probs  # the gradient tensor's name

# Make some blocks to regress agains just so there are gradients
expected_tokens = np.zeros_like(input_blocks) + np.eye(16).flatten()
expected_tokens = -sp.float32(np.array(list(expected_tokens)))  # negative sign for negative logprob
expected_tokens = builder.aiOnnx.constant(expected_tokens, 'expected_tokens')

pbias = builder.aiOnnx.constant(np.zeros([1, input_blocks.shape[-1]], dtype=np.float32)+1e-6, 'pbias')
biased_probs = builder.aiOnnx.add([probs, pbias])
logprobs = builder.aiOnnx.log([biased_probs])

out = builder.aiOnnx.mul([logprobs, expected_tokens])
loss = builder.aiGraphcore.l1loss([out], 1.0)

# Describe how to run the model
anchor_desc = {probs: popart.AnchorReturnType("ALL"), dlogits: popart.AnchorReturnType("ALL"), upstream_grad: popart.AnchorReturnType("ALL")}
dataFlow = popart.DataFlow(1, anchor_desc)

session = popart.TrainingSession(fnModel = builder.getModelProto(),
                                 loss = loss,
#Se obtiene primero el valor real, calculado a mano. (Calculadora TI con 12 flotantes)
valor_real = [
    7.21734 * (10**-10), 3.82569 * (10**-12), 1.89061 * (10**-14),
    3.26453 * (10**-17)
]

#Luego los valores exactos calculados por la consola, que es de 64 bits
valor_exacto = []
for i in a:
    ve = ((i**3))
    valor_exacto.append(ve)

#Valores calculados para 32 bits
valores_32 = []
for i in a:
    v32 = sp.float32((i**3))
    valores_32.append(v32)

#Valores calculados para 64 bits
valores_64 = []
for i in a:
    v64 = sp.float64((i**3))
    valores_64.append(v64)

#Calculo de errores porcentuales entre el valor real y exacto, y el valor entregado por el programa tanto en 32 y 64 bits
error_32_r = []
error_64_r = []

i = 0
while i < N:
    error_32_r.append(
Esempio n. 33
0
q = [math.pi, math.e, phi]

# q2 = cuadrado de los numeros irracionales calculados en calculadora
q2 = [9.86960440109, 7.38905609893, 2.61803398875]

# luego calculamos los cuadrados de los mismos números pero utilizando float64
# y float32 para luego comparar los errores

float64 = []
for i in q:
    q3 = sp.float64(i**2)
    float64.append(q3)

float32 = []
for i in q:
    q4 = sp.float32(i**2)
    float32.append(q4)

#Error 64:
error64 = []
while i in range(2):
    error = ((float64[i] - q2[i]) / q2[i])
    error64.append(error)
    i += 1

#Error 32:
error32 = []
while i in range(2):
    error = ((float32[i] - q2[i]) / q2[i])
    error32.append(error)
    i += 1
Esempio n. 34
0
def main(in_file):

    # Set up input and output file paths
    in_hdr = find_header(in_file)

    ##########################
    ##  MODIFIY THIS TO GET A BETTER FILENAME
    ##########################
    out_glint_file = in_file.split('_', 1)[0] + '_glint'
    out_glint_hdr = in_file.split('_', 1)[0] + '_glint.hdr'

    img = envi.open(in_hdr, in_file)
    inmm = img.open_memmap(interleave='source', writable=False)
    wl = s.array([float(w) for w in img.metadata['wavelength']])
    if (wl[0] < 100):
        wl = wl * 1000

    fwhm = s.array([float(w) for w in img.metadata['fwhm']])
    if (wl[0] < 100):
        fwhm = fwhm * 1000

    # set up metadata and constants

    # make output glint file and open memmap
    nl = int(img.metadata['lines'])
    metadata_glint = img.metadata.copy()
    metadata_glint['bands'] = u'1'
    metadata_glint['data type'] = u'4'
    metadata_glint['band names'] = ['Glint at 900nm']
    metadata_glint['interleave'] = 'bsq'
    metadata_glint[
        'description'] = ' make_glint.py from input ATREM reflectance '
    try:
        del metadata_glint['wavelength']
        del metadata_glint['wavelength units']
        del metadata_glint['fwhm']
        del metadata_glint['raw starting band']
        del metadata_glint['raw starting sample']
        del metadata_glint['raw starting line']
        del metadata_glint['line averaging']
    except:
        pass

    out_glint = envi.create_image(out_glint_hdr,
                                  metadata_glint,
                                  ext='',
                                  force=True)
    outmm_glint = out_glint.open_memmap(interleave='source', writable=True)

    # iterate over rows
    start, fin = 0, nl

    for i in range(start, fin):

        Rw = s.array(inmm[i, :, :])
        if img.metadata['interleave'] == 'bil':
            Rw = Rw.T
        ## if the ENVI data type is unsigned 16-bit integer, just change it to be signed
        ## so that numpy doesn't interpret them as insanely high numbers (e.g., 65535)
        if int(img.metadata['data type']) == 12:
            Rw.dtype = 'int16'
        if int(img.metadata['data type']) != 4:
            Rw = Rw / s.float32(10000)

        # iterate over columns
        colstart, colfin = 0, img.ncols

        glint_frame = s.zeros((outmm_glint.shape[2], 1))

        for col in range(colstart, colfin):

            # check for land and bad data flags
            ## if Rw[col,s.argmin(abs(wl-1000))] > 0.05 or all(Rw[col,:] <= 0):
            if Rw[col, s.argmin(abs(wl - 1000))] > 0.10 or all(
                    Rw[col, :] <= 0):
                continue

            # convert to Rrs
            Rrs_raw = Rw[col, :] / s.pi

            # subtract glint
            b900 = s.argmin(abs(wl - 900.0))
            glint = max(0.0001, s.median(Rrs_raw[(b900 - 2):(b900 + 3)]))

            if all(Rrs_raw < 0):
                continue  # out of bounds data

            # write output files
            glint_state = glint
            glint_frame[col, 0] = glint_state

        outmm_glint[0, i, :] = glint_frame.reshape(img.ncols)
        if ((i % 500) == 0):
            print 'line ' + str(i + 1)

    del outmm_glint, out_glint, inmm, img
Esempio n. 35
0
def _penalty_weight(w, lmd_w):
    if lmd_w == 0.0:
        return scipy.float32(0.0)
    else:
        return lmd_w * T.sum(T.abs_(w))
Esempio n. 36
0
depth_filename = sys.argv[1]
sensor_width = float(sys.argv[2])
sensor_height = float(sys.argv[3])
focal_distance = float(sys.argv[4])

imagefile = jsondata['SourceFile']
depth_near = jsondata['Near']
depth_far = jsondata['Far']
depth_format = jsondata['Format']

# Read RGB image.
image = scipy.misc.imread(imagefile)

# Read depth image.
depth_image = scipy.misc.imread(depth_filename)
depth_raw = scipy.float32(depth_image[:,:,0]) / 255.0

# Convert to actual depth using the formulas from
# https://developers.google.com/depthmap-metadata/encoding
if depth_format == 'RangeLinear':
    depth = depth_raw * (depth_far - depth_near) + depth_near
elif depth_format == 'RangeInverse':
    depth = depth_far * depth_near / (depth_far - depth_raw * (depth_far - depth_near))
else:
    print 'Unsupported depth format: %s\n' % depth_format
    sys.exit()

# Compute 3D coordinates.
[img_height, img_width] = scipy.shape(depth)
pixel_width = sensor_width / img_width
pixel_height = sensor_height / img_height
Esempio n. 37
0
 def __init__(self,image_path,prestim_frames=None):
     self.path = image_path
     
     ## ini data
     self.data = self.read_image(self.path)
     self.nLines = self.data.shape[0]
     self.nPlaces = self.data.shape[1]
     
     if prestim_frames:
         Fstart,Fstop = prestim_frames
         bck = sp.average(self.data[Fstart:Fstop,:],axis=0)[sp.newaxis,:]
         self.data = (self.data - bck) / bck
     
     ## ini UI
     # Image
     im_params = {'interpolation':'none',
                  'cmap':'jet',
                  'extent':[0,self.nPlaces,self.nLines,0],
                  'origin':'upper',
                  'aspect':sp.float32(self.data.shape[1]) / self.data.shape[0]} 
                  
     AxesImage = plt.imshow(self.data,**im_params)
     self.im_ax = AxesImage.axes
     self.im_fig = AxesImage.figure
     self.im_ax.set_xlabel('place [px]')
     self.im_ax.set_ylabel('line number')
     
     # coordinate calc
     self.pos = int(self.nPlaces/2) # is the position of the mouse pointer
     self.width = 11 # is x1 - x0
     self.xs = self.calc_x(self.pos,self.width) # is a tuple (x0,x1) along which is sliced
     
     # add patch
     rect_params = {'facecolor':'red',
                    'alpha':0.5}
                    
     self.Rect = Rectangle(self.xs,self.width,self.nLines,**rect_params)
     self.im_ax.add_patch(self.Rect)
     
     # extracted traces preview
     self.traces_fig = plt.figure()
     self.traces_ax = self.traces_fig.add_subplot(111)
     tempTrace_params = {'linewidth':2,
                         'color':'red'}
                         
     self.tempTrace, = self.traces_ax.plot(sp.zeros(self.nLines),**tempTrace_params)
     self.traces_ax.set_xlabel('line number')
     if prestim_frames:
         self.traces_ax.set_ylabel('dF/F')
     else:
         self.traces_ax.set_ylabel('intensity [au]')
 
     ## extracting info
     self.coords = []
     self.traces = []    
     
     # hooking up the interactive handles    
     self.im_fig.canvas.mpl_connect('button_press_event', self.mouse_clicked_event)
     self.im_fig.canvas.mpl_connect('scroll_event',self.scroll_event)
     self.im_fig.canvas.mpl_connect('motion_notify_event',self.mouse_moved_event)
     self.im_fig.canvas.mpl_connect('close_event', self.close_event)
     plt.show()
     pass
import network
import numpy as np
from scipy import misc, float32
import glob
training_data1, validation_data, test_data = mnist_loader.load_data_wrapper()
#print training_data[0][0]
training_data = []
folders = glob.glob(
    "C:/Users/ajha2/Desktop/PythonProjects/MNIST_image/mnist_png/testing/*")

for folder in folders:
    files = glob.glob(folder + "/*")
    for file in files:
        img = misc.imread(file)
        img_pixel_array = []
        for i in range(0, len(img)):
            for j in range(0, len(img[0])):
                img_pixel_array.append([float32(img[i][j]) / 256])
        img_pixel_np_array = np.array(img_pixel_array, ndmin=2)
        #img_digit = np.array([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]], ndmin = 2)
        #img_digit[int(folder[-1])][0] = 1.0
        training_data.append((img_pixel_np_array, int(folder[-1])))

f = open(
    'C:/Users/ajha2/Desktop/PythonProjects/MNIST_image/testing_image_objects.save',
    'wb')

cPickle.dump(training_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
print