def crossover(m1, m2, NN):
  # Maybe could be sped up using flatten/reshape output?
  net = NN()
  r = random.randint(0, net.wi.size + net.wo.size) 
  output1 = [empty_like(net.wi), empty_like(net.wo)]
  output2 = [empty_like(net.wi), empty_like(net.wo)]
  # split at index and then recombine
  # split_ind = computerAsIndex()
  # output1 = m1[split_ind:] + m2[:split_ind]

  # flatten, recombine and reshape
  # m1 = array(m1[0].flatten(), m1[1].flatten()).flatten()
  # output1 = m1[r:] + m2[:r]
  # output1 = [output1[:net.wi.size], output1[net.wi.size:]]

  for i in xrange(len(m1)):
    for j in xrange(len(m1[i])):
      for k in xrange(len(m1[i][j])):
        if r >= 0:
          output1[i][j][k] = m1[i][j][k]
          output2[i][j][k] = m2[i][j][k]
        elif r < 0:
          output1[i][j][k] = m2[i][j][k]
          output2[i][j][k] = m1[i][j][k]
        r -= 1
  return output1, output2
Esempio n. 2
0
def crossover(m1, m2, NN):
    # Maybe could be sped up using flatten/reshape output?
    net = NN()
    r = random.randint(0, net.wi.size + net.wo.size)
    output1 = [empty_like(net.wi), empty_like(net.wo)]
    output2 = [empty_like(net.wi), empty_like(net.wo)]
    # split at index and then recombine
    # split_ind = computerAsIndex()
    # output1 = m1[split_ind:] + m2[:split_ind]

    # flatten, recombine and reshape
    # m1 = array(m1[0].flatten(), m1[1].flatten()).flatten()
    # output1 = m1[r:] + m2[:r]
    # output1 = [output1[:net.wi.size], output1[net.wi.size:]]

    for i in xrange(len(m1)):
        for j in xrange(len(m1[i])):
            for k in xrange(len(m1[i][j])):
                if r >= 0:
                    output1[i][j][k] = m1[i][j][k]
                    output2[i][j][k] = m2[i][j][k]
                elif r < 0:
                    output1[i][j][k] = m2[i][j][k]
                    output2[i][j][k] = m1[i][j][k]
                r -= 1
    return output1, output2
Esempio n. 3
0
def crossover(m1, m2, NN):
  # Maybe could be sped up using flatten/reshape output?
  net = NN()
  r = random.randint(0, net.wi.size + net.wo.size) 
  output1 = [empty_like(net.wi), empty_like(net.wo)]
  output2 = [empty_like(net.wi), empty_like(net.wo)]

  for i in xrange(len(m1)):
    for j in xrange(len(m1[i])):
      for k in xrange(len(m1[i][j])):
        if r >= 0:
          output1[i][j][k][:] = m1[i][j][k]
          output2[i][j][k][:] = m2[i][j][k]
        elif r < 0:
          output1[i][j][k][:] = m2[i][j][k]
          output2[i][j][k][:] = m1[i][j][k]
        r -= 1
  return output1, output2
def calc_vec_field_fast(path_x, path_y, path_z, cube_size, cube_step):
    vector_field = 0
    # Grid for the coordinates of the field (Read about mgrid, similar to matlab)
    xc, yc, zc = mgrid[0:cube_size:cube_step, 0:cube_size:cube_step,
                       0:cube_size:cube_step]

    for m in range(path_x.shape[0]):

        # First we subtract a point of the path to the entire field
        vector_field_matrix = array(
            [path_x[m] - xc, path_y[m] - yc, path_z[m] - zc])

        # Now we obtain the components to calculate the vector module
        v_x = vector_field_matrix[0, :, :, :]
        v_y = vector_field_matrix[1, :, :, :]
        v_z = vector_field_matrix[2, :, :, :]
        v_x2 = v_x * v_x
        v_y2 = v_y * v_y
        v_z2 = v_z * v_z

        # Now we calculate a module vector field
        mod_vec = sqrt(v_x2 + v_y2 + v_z2)

        # Now we obtain the tangent to that point on the route
        tangent = array([(path_x[m] - path_x[m - 1]),
                         (path_y[m] - path_y[m - 1]),
                         (path_z[m] - path_z[m - 1])])

        # And define the tangent vector field
        tan_vec_field = empty_like(vector_field_matrix)

        # We apply a scaling factor to this matrix
        scaling_factor = 10
        tan_vec_field[0, :, :, :] = (tangent[0]) * scaling_factor
        tan_vec_field[1, :, :, :] = (tangent[1]) * scaling_factor
        tan_vec_field[2, :, :, :] = (tangent[2]) * scaling_factor
        vector_field_matrix = vector_field_matrix + tan_vec_field

        # Now we compare

        if m == 0:
            # First iteration
            min_mod_vec = mod_vec.copy()
            vector_field = vector_field_matrix.copy()
        else:
            # We check if the module of the vector in any point of the
            # matrix is less than the minimum module.
            # if it is we replace the value.
            # That means that for this particular point in the path there exist a shorter way
            # so we replace that part of the field.
            condition = (mod_vec < min_mod_vec)
            min_mod_vec[condition] = mod_vec[condition]
            vector_field[:, condition] = vector_field_matrix[:, condition]
    return vector_field, xc, yc, zc
Esempio n. 5
0
def arrayofanis(ra, dec, dmu, method=joshrwithrot):
    """Gets a 2d array of anisotropies, like the one presented in the poster"""

    # ra, dec
    inputs = pl.mgrid[0:360,0:180].swapaxes(0,2).swapaxes(0,1)*(2*pl.pi/360)
    outputs = pl.empty_like(inputs[:,:,0])

    for i in range(len(inputs[0,:])):
        for j in range(len(inputs[:,0])):
            outputs[j,i] = method(inputs[j,i], dmu, ra, dec)

    return outputs
def calc_vec_field_fast(path_x, path_y, path_z, cube_size, cube_step):
    vector_field = 0
    # Grid for the coordinates of the field (Read about mgrid, similar to matlab)
    xc, yc, zc = mgrid[0:cube_size:cube_step, 0:cube_size:cube_step, 0:cube_size:cube_step]

    for m in range(path_x.shape[0]):

        # First we subtract a point of the path to the entire field
        vector_field_matrix = array([path_x[m]-xc, path_y[m]-yc, path_z[m]-zc])
        
        # Now we obtain the components to calculate the vector module
        v_x = vector_field_matrix[0, :, :, :]
        v_y = vector_field_matrix[1, :, :, :]
        v_z = vector_field_matrix[2, :, :, :]
        v_x2 = v_x*v_x
        v_y2 = v_y*v_y
        v_z2 = v_z*v_z

        # Now we calculate a module vector field
        mod_vec = sqrt(v_x2+v_y2+v_z2)
        
        # Now we obtain the tangent to that point on the route
        tangent = array([(path_x[m]-path_x[m-1]), (path_y[m]-path_y[m-1]), (path_z[m]-path_z[m-1])])

        # And define the tangent vector field
        tan_vec_field = empty_like(vector_field_matrix)

        # We apply a scaling factor to this matrix
        scaling_factor = 10
        tan_vec_field[0, :, :, :] = (tangent[0])*scaling_factor
        tan_vec_field[1, :, :, :] = (tangent[1])*scaling_factor
        tan_vec_field[2, :, :, :] = (tangent[2])*scaling_factor
        vector_field_matrix = vector_field_matrix + tan_vec_field
        
        # Now we compare

        if m == 0:
            # First iteration
            min_mod_vec = mod_vec.copy()
            vector_field = vector_field_matrix.copy()
        else:
            # We check if the module of the vector in any point of the
            # matrix is less than the minimum module.
            # if it is we replace the value.
            # That means that for this particular point in the path there exist a shorter way
            # so we replace that part of the field.
            condition = (mod_vec < min_mod_vec)
            min_mod_vec[condition] = mod_vec[condition]
            vector_field[:, condition] = vector_field_matrix[:, condition]
    return vector_field, xc, yc, zc
Esempio n. 7
0
def test_decodefft():
    from time import time

    class finf:
        pass

    finf.num_chan = 1
    finf.num_hei = 300
    numbauds = 13
    finf.subcode = py.array([[1., 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1],
                             [-1., -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1,
                              -1]])
    data1 = py.random([6, finf.num_hei, 128])
    factor = 5.
    data1[0, :numbauds, 0] += factor * finf.subcode[0]
    data1[0, 50:50 + numbauds, 0] += factor * finf.subcode[0]

    data2 = py.random([6, finf.num_hei, 128])
    data2[0, 50:50 + numbauds, 0] += factor * finf.subcode[0]
    datas = (data1, data2)
    #    prevfs = py.rcParams['font.size']
    py.rcParams['font.size'] = 8  #this is just to make label lines close
    for i in range(2):
        data = datas[i].copy()
        fig = py.figure()
        nrows = 5

        ax = fig.add_subplot(nrows, 1, 1)
        ax.plot(data[0, :, 0], '.-')
        ax.legend(['data'], loc=2)
        ax.set_xlim([0, finf.num_hei])

        ax = fig.add_subplot(nrows, 1, 2)
        decodedata3 = py.empty_like(data)
        stime = time()
        #        for ch in range(data.shape[0]):
        #            decodedata3[ch,:,:] = decoder_ht(data[ch,:,:],finf.subcode[0],-1)
        decodedata3 = decoder_ht(data, finf.subcode, 1)
        dtime = time() - stime
        ax.plot(abs(decodedata3[0, :, 0]), '.-')
        ax.legend(['decoder %f ms' % (dtime * 1000)], loc=2)
        ax.set_xlim([0, finf.num_hei])
        #data with code in the middle

        data = datas[i].copy()
        stime = time()
        decodedata1 = decodefft(finf, data)
        dtime = time() - stime
        ax = fig.add_subplot(nrows, 1, 3)
        ax.plot(abs(decodedata1[0, :, 0]), '.-')
        ax.legend(['decodefft %f ms' % (dtime * 1000)], loc=2)
        ax.set_xlim([0, finf.num_hei])
        data = datas[i].copy()
        decodedata2 = decodefft(finf, data, dropheights=True)

        data = datas[i].copy()
        ax = fig.add_subplot(nrows, 1, 4)
        ax.plot(abs(decodedata2[0, :, 0]), '.-')
        ax.legend(['decodefft droping'], loc=2)
        ax.set_xlim([0, finf.num_hei])

        ax = fig.add_subplot(nrows, 1, 5)
        ax.plot(abs(decodedata1[0, :, 0]) - abs(decodedata3[0, :, 0]), '.-')
        ax.legend(['diff. decoder & decodefft'], loc=2)
        ax.set_xlim([0, finf.num_hei])
    py.draw()
    fig.show()
Esempio n. 8
0
def build_fft(input_signal,
              filter_coefficients,
              threshold_windows=6,
              boundary=0):
    """generate fast transform fourier by windows
    Params :
        input_signal : the audio signal
        filter_coefficients : coefficients of the chirplet bank
        threshold_windows : calcul the size of the windows
        boundary : manage the bounds of the signal
    Returns :
        fast Fourier transform applied by windows to the audio signal

    """
    num_coeffs = filter_coefficients.size
    #print(n,boundary,M)
    half_size = num_coeffs // 2
    signal_size = input_signal.size
    #power of 2 to apply fast fourier transform
    windows_size = 2**ceil(log2(num_coeffs * (threshold_windows + 1)))
    number_of_windows = floor(signal_size // windows_size)

    if number_of_windows == 0:
        return fft_based(input_signal, filter_coefficients, boundary)

    windowed_fft = empty_like(input_signal)
    #pad with 0 to have a size in a power of 2
    windows_size = int(windows_size)

    zeropadding = np.lib.pad(filter_coefficients,
                             (0, windows_size - num_coeffs),
                             'constant',
                             constant_values=0)

    h_fft = fft(zeropadding)

    #to browse the whole signal
    current_pos = 0

    #apply fft to a part of the signal. This part has a size which is a power
    #of 2
    if boundary == 0:  #ZERO PADDING

        #window is half padded with since it's focused on the first half
        window = input_signal[current_pos:current_pos + windows_size -
                              half_size]
        zeropaddedwindow = np.lib.pad(window, (len(h_fft) - len(window), 0),
                                      'constant',
                                      constant_values=0)
        x_fft = fft(zeropaddedwindow)

    elif boundary == 1:  #SYMMETRIC
        window = concatenate([
            flipud(input_signal[:half_size]),
            input_signal[current_pos:current_pos + windows_size - half_size]
        ])
        x_fft = fft(window)

    else:
        x_fft = fft(input_signal[:windows_size])

    windowed_fft[:windows_size - num_coeffs] = (ifft(
        x_fft * h_fft)[num_coeffs - 1:-1]).real

    current_pos += windows_size - num_coeffs - half_size
    #apply fast fourier transofm to each windows
    while current_pos + windows_size - half_size <= signal_size:

        x_fft = fft(input_signal[current_pos - half_size:current_pos +
                                 windows_size - half_size])
        #Suppress the warning, work on the real/imagina
        windowed_fft[current_pos:current_pos + windows_size -
                     num_coeffs] = (ifft(x_fft * h_fft)[num_coeffs -
                                                        1:-1]).real
        current_pos += windows_size - num_coeffs
    # print(countloop)
    #apply fast fourier transform to the rest of the signal
    if windows_size - (signal_size - current_pos + half_size) < half_size:

        window = input_signal[current_pos - half_size:]
        zeropaddedwindow = np.lib.pad(
            window,
            (0, int(windows_size - (signal_size - current_pos + half_size))),
            'constant',
            constant_values=0)
        x_fft = fft(zeropaddedwindow)
        windowed_fft[current_pos:] = roll(ifft(
            x_fft * h_fft), half_size)[half_size:half_size +
                                       windowed_fft.size - current_pos].real
        windowed_fft[-half_size:] = convolve(input_signal[-num_coeffs:],
                                             filter_coefficients,
                                             'same')[-half_size:]
    else:

        window = input_signal[current_pos - half_size:]
        zeropaddedwindow = np.lib.pad(
            window,
            (0, int(windows_size - (signal_size - current_pos + half_size))),
            'constant',
            constant_values=0)
        x_fft = fft(zeropaddedwindow)
        windowed_fft[current_pos:] = ifft(
            x_fft * h_fft)[num_coeffs - 1:num_coeffs + windowed_fft.size -
                           current_pos - 1].real

    return windowed_fft
Esempio n. 9
0
	def rtssmooth(self,Y):

		''' RTS smoother

		Arguments:
		----------
		Y: list of matrices
			observation vectors
		Returns:
		--------
		xb:list of matrices
			Backward posterior state estimates
		Pb:list of matrices
			Backward posterior covariabce matrices
		xhat:list of matrices
			Forward posterior state estimates
		Phat:list of matrices
			Forward posterior covariabce matrices

		'''

		# initialise
		P=self.model.P0 
		xf=self.model.x0
		# filter quantities
		xfStore =[]
		PfStore=[]

		#calculate the sigma vector weights
		Wm_i,Wc_i=self.sigma_vectors_weights()



		for y in Y:
			#calculate the sigma points matrix
			Chi=self.sigma_vectors(xf,P)
			# update sigma vectors
			Chi_update=pb.matrix(pb.empty_like(Chi))
			for i in range(Chi.shape[1]):
				Chi_update[:,i]=self.model.state_equation(Chi[:,i])	
			#calculate forward prior state estimate
			xf_=pb.sum(pb.multiply(Wm_i,Chi_update),1)
			#perturbation
			Chi_perturbation=Chi_update-xf_
			#weighting
			weighted_Chi_perturbation=pb.multiply(Wc_i,Chi_perturbation)
			#calculate forward prior covariance estimate
			Pf_=Chi_perturbation*weighted_Chi_perturbation.T+self.model.Sigma_e
			#measurement update equation
			Pyy=self.model.C*Pf_*self.model.C.T+self.model.Sigma_varepsilon 
			Pxy=Pf_*self.model.C.T
			K=Pxy*(Pyy.I)
			yhat_=self.model.C*xf_
			#calculate forward posterior state and covariance estimates
			xf=xf_+K*(y-yhat_)
			Pf=(pb.eye(self.model.nx)-K*self.model.C)*Pf_
			#store
			xfStore.append(xf)
			PfStore.append(Pf)

		# initialise the smoother
		T=len(Y)
		xb = [None]*T
		Pb = [None]*T

		xb[-1], Pb[-1] = xfStore[-1], PfStore[-1]

		## smoother
		for t in range(T-2,-1,-1):
			#calculate the sigma points matrix from filterd states
			Chi_smooth=self.sigma_vectors(xfStore[t],PfStore[t]) 
			Chi_smooth_update=pb.matrix(pb.empty_like(Chi))
			for i in range(Chi_smooth.shape[1]):
				Chi_smooth_update[:,i]=self.model.state_equation(Chi_smooth[:,i])
			
			#calculate backward prior state estimate
			xb_=pb.sum(pb.multiply(Wm_i,Chi_smooth_update),1) 
			#perturbation
			Chi_smooth_perturbation=Chi_smooth-xfStore[t] 
			Chi_smooth_update_perturbation=Chi_smooth_update-xb_ 
			#weighting
			weighted_Chi_smooth_perturbation=pb.multiply(Wc_i,Chi_smooth_perturbation) 
			weighted_Chi_smooth_update_perturbation=pb.multiply(Wc_i,Chi_smooth_update_perturbation)
			#calculate backward prior covariance
			Pb_=Chi_smooth_update_perturbation*weighted_Chi_smooth_update_perturbation.T+self.model.Sigma_e
			#calculate cross-covariance matrix
			M=weighted_Chi_smooth_perturbation*Chi_smooth_update_perturbation.T
			#calculate smoother gain
			S=M*Pb_.I
			#calculate backward posterior state and covariance estimates
			xb[t]=xfStore[t]+S*(xb[t+1]-xb_)
			Pb[t]=PfStore[t]+S*(Pb[t+1]-Pb_)*S.T

			
		return xb,Pb,xfStore,PfStore
Esempio n. 10
0
def test_decodefft():
    from time import time
    class finf:
        pass
    finf.num_chan = 1
    finf.num_hei = 300
    numbauds = 13
    finf.subcode = py.array([[1.,1,1,1,1,-1,-1,1,1,-1,1,-1,1],
            [-1.,-1,-1,-1,-1,1,1,-1,-1,1,-1,1,-1]])
    data1 = py.random([6,finf.num_hei,128])
    factor = 5.
    data1[0,:numbauds,0] += factor *finf.subcode[0]
    data1[0,50:50+numbauds,0] += factor * finf.subcode[0]
    
    data2 = py.random([6,finf.num_hei,128])
    data2[0,50:50+numbauds,0] += factor * finf.subcode[0]
    datas = (data1,data2)
#    prevfs = py.rcParams['font.size']
    py.rcParams['font.size'] = 8 #this is just to make label lines close
    for i in range(2):
        data = datas[i].copy()
        fig = py.figure()
        nrows = 5
        
        ax = fig.add_subplot(nrows,1,1)
        ax.plot(data[0,:,0],'.-');ax.legend(['data'],loc=2)
        ax.set_xlim([0,finf.num_hei])
        
        ax = fig.add_subplot(nrows,1,2)
        decodedata3 = py.empty_like(data)
        stime = time()
#        for ch in range(data.shape[0]):
#            decodedata3[ch,:,:] = decoder_ht(data[ch,:,:],finf.subcode[0],-1)
        decodedata3 = decoder_ht(data,finf.subcode,1)
        dtime = time() - stime
        ax.plot(abs(decodedata3[0,:,0]),'.-');
        ax.legend(['decoder %f ms'%(dtime*1000)],loc=2)
        ax.set_xlim([0,finf.num_hei])
        #data with code in the middle
        
        data = datas[i].copy()
        stime = time()
        decodedata1 = decodefft(finf,data)
        dtime = time() - stime
        ax = fig.add_subplot(nrows,1,3)
        ax.plot(abs(decodedata1[0,:,0]),'.-');
        ax.legend(['decodefft %f ms'%(dtime*1000)],loc=2)
        ax.set_xlim([0,finf.num_hei])
        data = datas[i].copy()
        decodedata2 = decodefft(finf,data,dropheights = True)

        data = datas[i].copy()
        ax = fig.add_subplot(nrows,1,4)
        ax.plot(abs(decodedata2[0,:,0]),'.-');
        ax.legend(['decodefft droping'],loc=2)
        ax.set_xlim([0,finf.num_hei])
        
        ax = fig.add_subplot(nrows,1,5)
        ax.plot(abs(decodedata1[0,:,0])-abs(decodedata3[0,:,0]),'.-');
        ax.legend(['diff. decoder & decodefft'],loc=2)
        ax.set_xlim([0,finf.num_hei])
    py.draw()
    fig.show()