Ejemplo n.º 1
1
def freq_from_HPS(sig, fs):
    """
    Estimate frequency using harmonic product spectrum (HPS)
    
    """
    windowed = sig * blackmanharris(len(sig))

    from pylab import subplot, plot, log, copy, show

    #harmonic product spectrum:
    c = abs(rfft(windowed))
    maxharms = 8
    subplot(maxharms,1,1)
    plot(log(c))
    for x in range(2,maxharms):
        a = copy(c[::x]) #Should average or maximum instead of decimating
        # max(c[::x],c[1::x],c[2::x],...)
        c = c[:len(a)]
        i = argmax(abs(c))
        true_i = parabolic(abs(c), i)[0]
        print 'Pass %d: %f Hz' % (x, fs * true_i / len(windowed))
        c *= a
        subplot(maxharms,1,x)
        plot(log(c))
    show()
Ejemplo n.º 2
0
 def applyCalibration(self,xs,ys,zs, x_o=0.0,x_s=1.0,y_o=0.0,y_s=1.0,z_o=0.0,z_s=1.0,xy_s=0.0,xz_s=0.0,yz_s=0.0):
     x = copy(xs)
     y = copy(ys)
     z = copy(zs)
     for i in range(len(xs)):
         xo = xs[i] - x_o
         yo = ys[i] - y_o
         zo = zs[i] - z_o
         x[i] = xo * (x_s + yo * xy_s + zo * xz_s)
         y[i] = yo * (y_s + zo * yz_s)
         z[i] = zo * z_s #(z_s + xo * xz_s + yo * yz_s)
     return x,y,z
Ejemplo n.º 3
0
def cg(A, b, x0):
    p = b - A.dot(x0)
    r = copy(p)
    norm_r0 = norm(r)
    x = copy(x0)

    while True:
        r_k_dot_r_k = r.dot(r)
        alpha = r_k_dot_r_k / p.dot(A.dot(p))
        x += alpha * p
        r -= alpha * A.dot(p)
        beta = r.dot(r) / p.dot(A.dot(p))
        p = r + beta * p
        if norm(r)/norm_r0 <= 1e-6:
            return x
Ejemplo n.º 4
0
def findClusterCenterRedshift(data):
    ''' Finds the center of the cluster in redshift space using the
    biweightlocation estimator.

    '''
    x = pyl.copy(data['redshift'].values)
    return ast.biweightLocation(x, tuningConstant=6.0)
Ejemplo n.º 5
0
def get_first_init(x0,epsilon,N):
    x_new = pl.copy(x0)
    print('getting the first initial condition')
    print('fiducial initial: '+str(x0))
    # multi particle array layout [nth particle v, (n-1)th particle v , ..., 0th v, nth particle x, x, ... , 0th particle x]
    
    # we will use a change of coordinates to get the location of the particle relative to x0. First
    # we just find some random point a distace epsilon from the origin.
    # need 2DN random angles 
    angle_arr = pl.array([])
    purturbs = pl.array([])

    # This is just an n-sphere 
    for i in range(2*N):
        angle_arr = pl.append(angle_arr,random.random()*2.0*pl.pi)
        cur_purt = epsilon
        for a,b in enumerate(angle_arr[:-1]):
            cur_purt *= pl.sin(b)
        if i == (2*N-1):
            cur_purt = pl.sin(angle_arr[i])
        else:
            cur_purt = pl.cos(angle_arr[i])

        purturbs = pl.append(purturbs,cur_purt)

    print('sqrt of sum of squars should be epsilon -> is it? --> ' +str(pl.sqrt(pl.dot(purturbs,purturbs))))
    print('len(purturbs) == 2N ? ' +str(len(purturbs)==(2*N)))

    return x_new+purturbs
Ejemplo n.º 6
0
def freq_from_HPS(sig, fs):
    """
    Estimate frequency using harmonic product spectrum (HPS)

    """
    windowed = sig * blackmanharris(len(sig))

    from pylab import subplot, plot, log, copy, show

    # harmonic product spectrum:
    c = abs(rfft(windowed))
    maxharms = 8
    subplot(maxharms, 1, 1)
    plot(log(c))
    for x in range(2, maxharms):
        a = copy(c[::x])  # Should average or maximum instead of decimating
        # max(c[::x],c[1::x],c[2::x],...)
        c = c[:len(a)]
        i = argmax(abs(c))
        true_i = parabolic(abs(c), i)[0]
        print 'Pass %d: %f Hz' % (x, fs * true_i / len(windowed))
        c *= a
        subplot(maxharms, 1, x)
        plot(log(c))
    show()
Ejemplo n.º 7
0
def findcurve(psi1,psi2,n=3,nn_fit=4,nn_out=100):
    '''
    Function to find the elastica curve for start and end orientations
    psi1 and psi2. It finds the best curve across all directions from start
    and end, i.e. the direction independent elastica curve.
    
    Inputs
    ------------
    psi1,psi2: start and end orientations.
    n:     degree of estimation polynomial.
    nn:    number of points on the curve.
             - nn_fit: for fittin purposes
             - nn_out: for the output
    
    Outputs
    ------------
    Returns a tuple (s,psi). 
    s:   points on the curve.
    psi: curvature of the curve as a function of s.
    E:   curvature energy of the curve
    '''
    # 
    
    # define the starting conditions
    a0 = pl.zeros(n+1) 
    
    # Set a high energy: 
    E_best = 10000  
    
    # and predfine output curve
    s       = pl.linspace(0,1,nn_out) # points on the curve
    psi_out = pl.zeros(nn_out)        # curvature at points in curve
    
    
    # across all the start and end directions find the curve with the lowest energy    
    for dpsi1 in (-pl.pi,0,pl.pi):
        for dpsi2 in (-pl.pi,0,pl.pi):
            # For the starting variables,
            # the first two polygon variables can be estimated from the Sharon paper derivation
            # For different starting variables the solution can be hard to find            
            a0[-2] = 4*(   pl.arcsin(- (pl.sin(psi1+dpsi1)+ pl.sin(psi2+dpsi2))/4)    -(psi1+dpsi1+psi2+dpsi2)/2       )
            a0[-1] = 2*a0[-2]/pl.cos( (psi1+dpsi1+psi2+dpsi2)/2 + a0[-2]/4  )               
            
            # find the best variables to minimize the elastica energy
            fit = fsolve(errors,a0,args=(psi1+dpsi1,psi2+dpsi2,nn_fit))
    
            # find the curve and its derivative for the fitted variables
            a    = fit[:-1]
            psi  = Psi(a,s,psi1+dpsi1,psi2+dpsi2)
            dpsi = dPsi(a,s,psi1+dpsi1,psi2+dpsi2)
    
            # find the energy of this curve
            E = sum(dpsi**2)*s[1]
            
            # check against the lowest energy
            if E_best > E:
                E_best = E
                psi_out[:] = pl.copy(psi)    
    
    return (s,psi_out,E_best)
Ejemplo n.º 8
0
def renormalize(x_unpurt,x_before,x_purt,epsilon,N):
    # BEFORE ANYTHING: make sure particles near boundaries are shuffeled into places where where the
    # seam is not between any purturbed and fudicial trajectories.
    x_unpurt,x_purt = shuff(x_unpurt,x_purt,N)

    # The trajectory we are going to be returning is going to be the new one for the next run. lets
    # call it
    x_new = pl.copy(x_unpurt)
    # copied it because we are going to add the small amounts to it to purturb it.

    # lets find a vector pointing in the direction of the trajectories path. For this we need the
    # fiducual point at t-dt, which is given to us in the function as x_before. find the vector
    # between x_before and x_unpurt
    traj_vec = x_unpurt-x_before 
    # normalize it
    traj_vec = traj_vec/pl.sqrt(pl.dot(traj_vec,traj_vec))
    print('traj_vec magnitude (should be 1): ' + str(pl.sqrt(pl.dot(traj_vec,traj_vec))))

    # Now lets see how close the vector pointing from the fidicial to the perturbed trajectorie is
    # to orthogonal with the trajectory... should get closer to 1 as we check more because it should
    # be aligning itself with the axis of greatest expansion and that should be orthogonal.
    # First normalize the difference vector
    diff_vec = x_unpurt - x_purt
    # normalize it
    diff_vec = diff_vec/pl.sqrt(pl.dot(diff_vec,diff_vec))
    print('diff_vec magnitude (should be 1): ' + str(pl.sqrt(pl.dot(diff_vec,diff_vec))))
    print('normalized(x_unpurt-x_purt)dot(traj_vec)  (should get close to 0): '+ str(pl.dot(diff_vec,traj_vec)))

    # for now lets just return a point moved back along the difference vector. no gram shmidt or
    # anything.
    return x_new + epsilon*diff_vec
Ejemplo n.º 9
0
def my_prepADCcalib_CrsFn(ADCcalibFilePath, ADCcalibFilePrefix, NGroup):
    ''' TS1.0 calibration: .h5 ADCcalib filepath,prefix => 4x numpy arrays (Crs/Fn, gain/offset)
    prefix is file name without _Coarse/Fine Gain/Offset Array.h5
    '''
    #
    # load data from h5
    ADCcalibFile_CoarseGain = ADCcalibFilePath + ADCcalibFilePrefix + '_CoarseGainArray.h5'
    ADCcalibFile_CoarseOffset = ADCcalibFilePath + ADCcalibFilePrefix + '_CoarseOffsetArray.h5'
    ADCcalibFile_FineGain = ADCcalibFilePath + ADCcalibFilePrefix + '_FineGainArray.h5'
    ADCcalibFile_FineOffset = ADCcalibFilePath + ADCcalibFilePrefix + '_FineOffsetArray.h5'
    #
    my5hfile = h5py.File(ADCcalibFile_CoarseGain, 'r')
    myh5dataset = my5hfile['/data/data/']
    my5hfile.close
    ADCcalib_CoarseGain_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_CoarseOffset, 'r')
    myh5dataset = my5hfile['/data/data/']
    my5hfile.close
    ADCcalib_CoarseOffset_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_FineGain, 'r')
    myh5dataset = my5hfile['/data/data/']
    my5hfile.close
    ADCcalib_FineGain_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_FineOffset, 'r')
    myh5dataset = my5hfile['/data/data/']
    my5hfile.close
    ADCcalib_FineOffset_160x7Array = numpy.array(myh5dataset)
    #
    ADCcalibArr_CoarseGain = pylab.copy(ADCcalib_CoarseGain_160x7Array)
    ADCcalibArr_CoarseOffset = pylab.copy(ADCcalib_CoarseOffset_160x7Array)
    ADCcalibArr_FineGain = pylab.copy(ADCcalib_FineGain_160x7Array)
    ADCcalibArr_FineOffset = pylab.copy(ADCcalib_FineOffset_160x7Array)
    for iGroup in range(NGroup - 1):
        # -1 because there is already a copy of it
        ADCcalibArr_CoarseGain = pylab.concatenate(
            (ADCcalibArr_CoarseGain, ADCcalib_CoarseGain_160x7Array))
        ADCcalibArr_CoarseOffset = pylab.concatenate(
            (ADCcalibArr_CoarseOffset, ADCcalib_CoarseOffset_160x7Array))
        ADCcalibArr_FineGain = pylab.concatenate(
            (ADCcalibArr_FineGain, ADCcalib_FineGain_160x7Array))
        ADCcalibArr_FineOffset = pylab.concatenate(
            (ADCcalibArr_FineOffset, ADCcalib_FineOffset_160x7Array))
    #
    return ADCcalibArr_CoarseGain, ADCcalibArr_CoarseOffset, ADCcalibArr_FineGain, ADCcalibArr_FineOffset
Ejemplo n.º 10
0
def my_prepADCcalib_CrsFn(ADCcalibFilePath, ADCcalibFilePrefix, NGroup):
    """ TS1.0 calibration: .h5 ADCcalib filepath,prefix => 4x numpy arrays (Crs/Fn, gain/offset)
    prefix is file name without _Coarse/Fine Gain/Offset Array.h5
    """
    #
    # load data from h5
    ADCcalibFile_CoarseGain = ADCcalibFilePath + ADCcalibFilePrefix + "_CoarseGainArray.h5"
    ADCcalibFile_CoarseOffset = ADCcalibFilePath + ADCcalibFilePrefix + "_CoarseOffsetArray.h5"
    ADCcalibFile_FineGain = ADCcalibFilePath + ADCcalibFilePrefix + "_FineGainArray.h5"
    ADCcalibFile_FineOffset = ADCcalibFilePath + ADCcalibFilePrefix + "_FineOffsetArray.h5"
    #
    my5hfile = h5py.File(ADCcalibFile_CoarseGain, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_CoarseGain_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_CoarseOffset, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_CoarseOffset_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_FineGain, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_FineGain_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_FineOffset, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_FineOffset_160x7Array = numpy.array(myh5dataset)
    #
    ADCcalibArr_CoarseGain = pylab.copy(ADCcalib_CoarseGain_160x7Array)
    ADCcalibArr_CoarseOffset = pylab.copy(ADCcalib_CoarseOffset_160x7Array)
    ADCcalibArr_FineGain = pylab.copy(ADCcalib_FineGain_160x7Array)
    ADCcalibArr_FineOffset = pylab.copy(ADCcalib_FineOffset_160x7Array)
    for iGroup in range(NGroup - 1):
        # -1 because there is already a copy of it
        ADCcalibArr_CoarseGain = pylab.concatenate((ADCcalibArr_CoarseGain, ADCcalib_CoarseGain_160x7Array))
        ADCcalibArr_CoarseOffset = pylab.concatenate((ADCcalibArr_CoarseOffset, ADCcalib_CoarseOffset_160x7Array))
        ADCcalibArr_FineGain = pylab.concatenate((ADCcalibArr_FineGain, ADCcalib_FineGain_160x7Array))
        ADCcalibArr_FineOffset = pylab.concatenate((ADCcalibArr_FineOffset, ADCcalib_FineOffset_160x7Array))
    #
    return ADCcalibArr_CoarseGain, ADCcalibArr_CoarseOffset, ADCcalibArr_FineGain, ADCcalibArr_FineOffset
Ejemplo n.º 11
0
def __my_cumsum(iters):
    cumsum = 0
    iterres = pl.copy(iters)
    for j, iter_temp in enumerate(iters):
        if j > 0 and iter_temp == 0:
            cumsum = iterres[j - 1]
            iterres[j - 1] -= 0.00
            iterres[j] += cumsum + 0.00
        else:
            iterres[j] += cumsum
    return iterres
Ejemplo n.º 12
0
def get_note(region, sample_rate):

	# Piano note.
	a4_pitch = 440

	# A list of frequencies can be found here:
	# http://en.wikipedia.org/wiki/Piano_key_frequencies

	# A sample of FFTs per piano key can be found here:
	# https://www.youtube.com/watch?v=5xjD6SRY8Pg

	# The perceived note that we hear and associate with a certain frequency
	# are actually a series of harmonic peaks. The fundamental frequency
	# might be missing from the FFT.

	# There are three ways that I know of to get a note through the FFT:
	# Look at peaks: I don't think this is happening without prior knowledge
	# to what the note will be (which we will have...)
	# Maybe we could look into some sort of machine learning for this.
	# Autocorrelation: Never looked into this.
	# Harmonic product spectrum: I'm using this for now.
	# Basic idea -- http://cnx.org/content/m11714/latest/

	# TBH I think this is the wrong approach, but I don't know what approach
	# to take given the notes we have from the MusicXML file.

	# Compress the FFT to 1/2, 1/3 & 1/4 to its size.
	# Could be made more efficient or something but whatever.
	max_harmonics = 4
	original_freqs = fft(region, sample_rate)
	hps_result = copy(original_freqs)

	for i in xrange(2, max_harmonics + 1):
		compressed_freqs = copy(original_freqs[::i])
		hps_result = hps_result[:len(compressed_freqs)]
		hps_result *= compressed_freqs

	# Find resulting peak here.
	return find_peaks(hps_result, 0.5)[0] # i dunno lol
Ejemplo n.º 13
0
    def updateOut(self):
        for p in self.out.keys():
            if p in self.keys:
                tmp = getattr(self, p)
            elif p in self.evaMod.keys:
                tmp = getattr(self.evaMod, p)
            elif p in self.evaCon.keys:
                tmp = getattr(self.evaCon, p)
            else:
                print 'Error in updateOut with %s.' % p
                pass

            self.out[p].append(copy(tmp))
Ejemplo n.º 14
0
    def GetPointFromMouse(self, image):
        #Copy image
        drawImage1 = copy(image)            

        #Make figure
        fig = figure("Point selection")

        title("Click on a plane in the image")

        #Show image and request input
        imshow(drawImage1)
        point = fig.ginput(1, -1)    
        return point
Ejemplo n.º 15
0
    def imagesc2(self,
                 data,
                 newfig=True,
                 str='',
                 ax=1,
                 cbar=1,
                 txt=False,
                 txtprec=2,
                 txtsz=18,
                 txtnz=0,
                 txtrev=0,
                 labels=None,
                 **kwargs):
        kwargs = self.check_kwargs(kwargs, DEFAULT_IMAGESC_KWARGS)
        if newfig:
            fig = P.figure()
        data = P.copy(data)
        if len(data.shape) < 2:
            data = P.atleast_2d(data)

    #    if txtnz:
    #        kwargs['vmin'] = data[where(data>txtnz)].min()

        P.imshow(data, **kwargs)
        if cbar: P.colorbar()
        if labels is not None:
            P.xticks(np.arange(len(labels)), labels)
            P.yticks(np.arange(len(labels)), labels)
        if txt:
            thr = data.min() + (data.max() - data.min()) / 2.0
            for a in range(data.shape[0]):
                for b in range(data.shape[1]):
                    if data[a, b] < thr:
                        col = P.array([1, 1, 1])
                    else:
                        col = P.array([0, 0, 0])
                    if txtrev:
                        col = 1 - col
                    d = data[a, b].round(txtprec)
                    if txtprec == 0:
                        d = int(d)
                    if not txtnz or txtnz and d > txtnz:  # scale only non-zero values
                        P.text(b - 0.125,
                               a + 0.125,
                               d,
                               color=col,
                               fontsize=txtsz)
        plt.title(str, fontsize=12)
        return fig
Ejemplo n.º 16
0
    def applyCalibration(self,readings, calibration=None):
        def offsetAndScale(min,max):
            offset = (max + min) / 2.0
            oMax = max - offset
            oMin = min - offset
            scale = 2.0 / (oMax - oMin)
            return offset,scale

        if calibration is not None:
            offset = calibration[0]
            scale = calibration[1]
        else:
            offset,scale = offsetAndScale(min(readings), max(readings))

        data = copy(readings)
        for i in range(len(data)):
            data[i] = (data[i] - offset) * scale
        return data, offset, scale
def freq_from_HPS(sig, fs):
    """
    Estimate frequency using harmonic product spectrum (HPS)
    """
    windowed = sig * signal.blackmanharris(len(sig))

    # harmonic product spectrum:
    c = abs(rfft(windowed))
    maxharms = 6
    plot(log(c))
    arr = np.zeros(maxharms)
    for x in range(2, maxharms):
        a = copy(c[::x])  # Should average or maximum instead of decimating
        #max(c[::x],c[1::x],c[2::x],...)
        c = c[:len(a)]
        i = np.argmax(abs(c))
        true_i = parabolic(abs(c), i)[0]
        arr[x] = fs * true_i / len(windowed)
        c *= a
    return arr[3]
Ejemplo n.º 18
0
def get_max_harmonics(sig: array, fs: float, maxharms: int):
    """
    Estimate peak frequency using harmonic product spectrum (HPS)
    Harmonic product spectrum: Measures the maximum coincidence for harmonics for each spectral frame
    Search for a maximum value of a range of possible fundamental frequencies
    Args:
        sig (array): Signal
        fs (float): Sampling rate of the signal
        maxharms (int): Max harmonics
    """
    c, pitch = hps(sig, fs, maxharms)
    for x in range(2, maxharms):
        a = copy(c[::x])  # Should average or maximum instead of decimating
        c = c[:len(a)]
        i = np.argmax(abs(c))
        c *= a
        plt.title(
            "Max Harmonics for the range of %d times the fundamental frequencies"
            % x)
        plt.plot(maxharms, x)
        plt.plot(np.log(c))
    show()
Ejemplo n.º 19
0
def social_optimum_range(network,r_range):
    new_flow = [0.]*n_links(network)
    j = 0
    last_flow_amount = 0
    last_cap = link_c(network_link(network,j))
    states = []
    steps = [x - y for x,y in zip(r_range,[0] + list(r_range[:-1]))]
    for step in steps:
        new_flow = copy(new_flow)
        while True:
            if last_cap - last_flow_amount > step:
                last_flow_amount+=step
                new_flow[j]=last_flow_amount
                break
            new_flow[j]=last_cap
            step-=(last_cap - last_flow_amount)
            j+=1
            if j == n_links(network):
                raise Exception('not enough cap')
            last_flow_amount = 0
            last_cap = link_c(network_link(network,j))
        states.append([link_state(flow,MODE_FF) for flow in new_flow])
    return states
Ejemplo n.º 20
0
  def refine(self, edge_errors, gamma=1.4):
    """
    This function iterates through the cells in the mesh, then refines
    the mesh based on the relative error and the cell's location in the
    mesh.
    
    :param edge_errors : Dolfin edge function containing edge errors of 
                         of the current mesh.
    :param gamma       : Scaling factor for determining which edges need be 
                         refined.  This is determined by the average error 
                         of the edge_errors variable
    """
    mesh = self.mesh
    
    mesh.init(1,2)
    mesh.init(0,2)
    mesh.init(0,1)
    
    avg_error                 = edge_errors.array().mean()
    error_sorted_edge_indices = p.argsort(edge_errors.array())[::-1]
    refine_edge               = FacetFunction('bool', mesh)
    for e in edges(mesh):
      refine_edge[e] = edge_errors[e] > gamma*avg_error

    coordinates = p.copy(self.mesh.coordinates())      
    current_new_vertex = len(coordinates)
    cells_to_delete = []
    new_cells = []

    for iteration in range(refine_edge.array().sum()):
      for e in facets(self.mesh):
        if refine_edge[e] and (e.index()==error_sorted_edge_indices[0]):
          adjacent_cells = e.entities(2)
          adjacent_vertices = e.entities(0)
          if not any([c in cells_to_delete for c in adjacent_cells]):
            new_x,new_y = e.midpoint().x(),e.midpoint().y()
            coordinates = p.vstack((coordinates,[new_x,new_y]))
            for c in adjacent_cells:
              off_facet_vertex = list(self.mesh.cells()[c])
              [off_facet_vertex.remove(ii) for ii in adjacent_vertices]
              for on_facet_vertex in adjacent_vertices:
                new_cell = p.sort([current_new_vertex,off_facet_vertex[0],on_facet_vertex])
                new_cells.append(new_cell)
              cells_to_delete.append(c)
            current_new_vertex+=1
      error_sorted_edge_indices = error_sorted_edge_indices[1:]

    old_cells = self.mesh.cells()
    keep_cell = p.ones(len(old_cells))
    keep_cell[cells_to_delete] = 0
    old_cells_parsed = old_cells[keep_cell.astype('bool')]
    all_cells = p.vstack((old_cells_parsed,new_cells))
    n_cells = len(all_cells)

    e = MeshEditor()
    refined_mesh = Mesh()
    e.open(refined_mesh,self.mesh.geometry().dim(),self.mesh.topology().dim())
    e.init_vertices(current_new_vertex)
    for index,x in enumerate(coordinates):
      e.add_vertex(index,x[0],x[1])
  
    e.init_cells(n_cells)
    for index,c in enumerate(all_cells):
      e.add_cell(index,c.astype('uintc'))

    e.close()
    refined_mesh.order()
    self.mesh = refined_mesh 
Ejemplo n.º 21
0
  def weighted_smoothing(self, edge_errors, omega=0.1):
    """
    Smooths the points contained within the mesh
    
    :param edge_errors : Dolfin edge function containing the calculated
                         edge errors of the mesh
    :param omega       : Weighting factor used to refine the mesh
    """
    mesh  = self.mesh
    coord = mesh.coordinates()

    adjacent_points = {}
    mesh.init(1,2)
    
    #Create copies of the x coordinates
    new_x          = p.copy(coord[:,0])
    new_y          = p.copy(coord[:,1])
    exterior_point = {}

    for v in vertices(mesh):
      adjacent_points[v.index()] = set()

    for e in facets(mesh):
      vert = e.entities(0)
      ext  = e.exterior()
      for ii in (0,1):
        adjacent_points[vert[ii]].add((vert[ii-1], edge_errors[e]))
        adjacent_points[vert[ii-1]].add((vert[ii], edge_errors[e]))
      exterior_point[vert[0]] = ext
      exterior_point[vert[1]] = ext

    for item in adjacent_points.iteritems():
      index, data = item
      x       = coord[index,0]
      y       = coord[index,1]
      x_sum   = 0.0
      y_sum   = 0.0
      wgt_sum = 0.0
      kbar    = 0.0

      for entry in list(data):
        x_p   = coord[entry[0],0]
        y_p   = coord[entry[0],1]
        error = entry[1]
        kbar += 1./len(list(data)) * error/p.sqrt( (x-x_p)**2 + (y-y_p)**2 ) 
      kbar = 0.0 

      for entry in list(data):
        x_p      = coord[entry[0],0]
        y_p      = coord[entry[0],1]
        error    = entry[1]
        k_ij     = error/p.sqrt( (x-x_p)**2 + (y-y_p)**2 )
        x_sum   += (k_ij-kbar) * (x_p-x)
        y_sum   += (k_ij-kbar) * (y_p-y)
        wgt_sum += k_ij
        
      if not exterior_point[index]:
        new_x[index] = x + omega * x_sum / wgt_sum
        new_y[index] = y + omega * y_sum / wgt_sum

    return new_x, new_y 
Ejemplo n.º 22
0
def swap2 (M, i, j):
    temp = pylab.copy(M[:,j])
    M[:,j] = M[:,i]
    M[:,i] = temp
Ejemplo n.º 23
0
def swap1(V, i, j):
    temp = pylab.copy(V[i])
    V[i] = V[j]
    V[j] = temp
Ejemplo n.º 24
0
    def banana_like(X, tau, b):
        phi_X = pl.copy(X)
        phi_X *= 30. # rescale X to match scale of other models
        phi_X[1] = phi_X[1] + b*phi_X[0]**2 - 100*b

        return mc.normal_like(phi_X, 0., tau)
Ejemplo n.º 25
0
pylab.figure()
n = 1
p = 0
pstep = 200
while n <= nstep:
    if n >= p * pstep:
        pylab.plot(x, [u for u in psi],
                   'o-',
                   label='step (s)=' + str(p * pstep))
        pylab.title('Wave function - Quartic potential')
        pylab.xlabel('x')
        pylab.ylabel('$\psi$')
        pylab.legend(loc=(1.03, 0.2))
        p += 1
    # choose a random point and a random amount to change psi
    tmp = pylab.copy(psi)  #temporary wavefunction trial
    j = random.choice(range(1, N - 1))
    tmp[j] *= random.uniform(0.8, 1.2)
    # normalize and compute energy
    E[n] = 0.0
    ssq = 0.0
    for i in range(1, N - 1):
        H = -hbar**2 * (tmp[i - 1] - 2.0 * tmp[i] + tmp[i + 1]) / (2 * m *
                                                                   dx**2)
        H += V[i] * tmp[i]
        E[n] += tmp[i] * H * dx
        ssq += tmp[i]**2 * dx
    E[n] /= ssq
    # test if the trial wavefunction reduces energy
    if E[n] < E[n - 1]:
        # update current wavefunction
Ejemplo n.º 26
0
    def refine(self, edge_errors, gamma=1.4):
        """
    This function iterates through the cells in the mesh, then refines
    the mesh based on the relative error and the cell's location in the
    mesh.
    
    :param edge_errors : Dolfin edge function containing edge errors of 
                         of the current mesh.
    :param gamma       : Scaling factor for determining which edges need be 
                         refined.  This is determined by the average error 
                         of the edge_errors variable
    """
        mesh = self.mesh

        mesh.init(1, 2)
        mesh.init(0, 2)
        mesh.init(0, 1)

        avg_error = edge_errors.array().mean()
        error_sorted_edge_indices = pl.argsort(edge_errors.array())[::-1]
        refine_edge = FacetFunction('bool', mesh)
        for e in edges(mesh):
            refine_edge[e] = edge_errors[e] > gamma * avg_error

        coordinates = pl.copy(self.mesh.coordinates())
        current_new_vertex = len(coordinates)
        cells_to_delete = []
        new_cells = []

        for iteration in range(refine_edge.array().sum()):
            for e in facets(self.mesh):
                if refine_edge[e] and (e.index()
                                       == error_sorted_edge_indices[0]):
                    adjacent_cells = e.entities(2)
                    adjacent_vertices = e.entities(0)
                    if not any([c in cells_to_delete for c in adjacent_cells]):
                        new_x, new_y = e.midpoint().x(), e.midpoint().y()
                        coordinates = pl.vstack((coordinates, [new_x, new_y]))
                        for c in adjacent_cells:
                            off_facet_vertex = list(self.mesh.cells()[c])
                            [
                                off_facet_vertex.remove(ii)
                                for ii in adjacent_vertices
                            ]
                            for on_facet_vertex in adjacent_vertices:
                                new_cell = pl.sort([
                                    current_new_vertex, off_facet_vertex[0],
                                    on_facet_vertex
                                ])
                                new_cells.append(new_cell)
                            cells_to_delete.append(c)
                        current_new_vertex += 1
            error_sorted_edge_indices = error_sorted_edge_indices[1:]

        old_cells = self.mesh.cells()
        keep_cell = pl.ones(len(old_cells))
        keep_cell[cells_to_delete] = 0
        old_cells_parsed = old_cells[keep_cell.astype('bool')]
        all_cells = pl.vstack((old_cells_parsed, new_cells))
        n_cells = len(all_cells)

        e = MeshEditor()
        refined_mesh = Mesh()
        e.open(refined_mesh,
               self.mesh.geometry().dim(),
               self.mesh.topology().dim())
        e.init_vertices(current_new_vertex)
        for index, x in enumerate(coordinates):
            e.add_vertex(index, x[0], x[1])

        e.init_cells(n_cells)
        for index, c in enumerate(all_cells):
            e.add_cell(index, c.astype('uintc'))

        e.close()
        refined_mesh.order()
        self.mesh = refined_mesh
Ejemplo n.º 27
0
    def banana_like(X, tau, b):
        phi_X = pl.copy(X)
        phi_X *= 30.  # rescale X to match scale of other models
        phi_X[1] = phi_X[1] + b * phi_X[0]**2 - 100 * b

        return mc.normal_like(phi_X, 0., tau)
Ejemplo n.º 28
0
    def initiate(self):
        if len(self.pp_specs) == 0:
            return False
        self.ax = self.fig.add_subplot(1, 1, 1, adjustable='box', aspect=1.0)
        self.p0 = self.pp_specs[0]  # alias
        self.ax.axis(self.p0.range)
        self.ax.set_xlabel('$RR_{n}$ [ms]')
        self.ax.set_ylabel('$RR_{n+1}$ [ms]')

        empty_rectangle = Rectangle((0, 0), 1, 1, fc="w", fill=False,
                                    edgecolor='none', linewidth=0)

        white = pl.array([255, 255, 255, 0]) / 255.0

        #x_data and y_data are the same for all items included in pp_specs
        #array, we have to add add one item for a centroid value
        x_data = pl.hstack((pl.copy(self.p0.x_data), pl.array([0])))
        y_data = pl.hstack((pl.copy(self.p0.y_data), pl.array([0])))

        colors0 = [white] * len(x_data)
        sizes0 = [self.manager.inactive_point_size] * len(x_data)

        #at the the last index is a centroid
        sizes0[-1] = self.p0.centroid_point_size
        colors0[-1] = self.p0.centroid_color

        if self.p0.level == 0:
            colors0[self.p0.active_start:self.p0.active_stop] = \
                    [self.manager.active_color] * (self.p0.active_stop
                                              - self.p0.active_start)
            sizes0[self.p0.active_start:self.p0.active_stop] = \
                    [self.manager.active_point_size] * (self.p0.active_stop
                                                   - self.p0.active_start)
        else:
            if self.p0.inactive_start >= 0 and self.p0.inactive_stop >= 0:
                colors0[:self.p0.inactive_stop] = \
                        [self.manager.inactive_color] * self.p0.inactive_stop
                sizes0[:self.p0.inactive_stop] = \
                    [self.manager.inactive_point_size] * self.p0.inactive_stop
            if self.p0.active_stop >= 0:
                colors0[self.p0.inactive_stop:self.p0.active_stop] = \
                    [self.manager.active_color] * (self.p0.active_stop
                                               - self.p0.inactive_stop)
                sizes0[self.p0.inactive_stop:self.p0.active_stop] = \
                    [self.manager.active_point_size] * (self.p0.active_stop
                                                    - self.p0.inactive_stop)
            if self.p0.inactive_start_2 >= 0 and self.p0.inactive_stop_2 >= 0:
                colors0[self.p0.inactive_start_2:self.p0.inactive_stop_2] = \
                   [self.manager.inactive_color] * (self.p0.inactive_stop_2
                                               - self.p0.inactive_start_2)
                sizes0[self.p0.inactive_start_2:self.p0.inactive_stop_2] = \
                   [self.manager.inactive_point_size] *\
                     (self.p0.inactive_stop_2 - self.p0.inactive_start_2)

# for future use
#        if p.show_plot_legends == True:
#
#            if p.level == 0:
#                leg_plots = ax.legend((a_plot, c_plot),
#                                       ('biezacy PP', "controid"),
#                                       'upper right', scatterpoints=1)
#            else:
#                leg_plots = ax.legend((a_plot, i_plot, c_plot),
#                            ('biezacy PP', "poprzednie PP", "controid"),
#                            'upper right', scatterpoints=1)  # , shadow=True)
#            leg_plots.get_frame().set_alpha(0.5)
#            ltext = leg_plots.get_texts()
#            plt.setp(ltext, fontsize=8)

        self.scatter = self.ax.scatter(x_data, y_data, c=colors0, s=sizes0,
                     edgecolors='none', animated=False)

        if self.p0.level == 0:
            time_label = get_time_label_for_miliseconds(0)
        else:
            time_label = get_time_label_for_miliseconds(self.p0.cum_inactive)
        leg_time = self.ax.legend([empty_rectangle], [time_label],
                                  'upper left')
        leg_time.get_frame().set_alpha(0.5)
        ltext = leg_time.get_texts()
        plt.setp(ltext, fontsize=8)
        self.ax.add_artist(leg_time)
        self.legend_text = ltext[0]
        self.legend_text.set_text(('%s [%d]') % (time_label, self.p0.idx))

        self.offsets = self.scatter.get_offsets()
        centroid = pl.array([self.p0.mean_plus, self.p0.mean_minus])
        self.offsets[-1].put(pl.arange(0, 2), centroid)
        return True
Ejemplo n.º 29
0
    def weighted_smoothing(self, edge_errors, omega=0.1):
        """
    Smooths the points contained within the mesh

    Args:
    
      :edge_errors:  Dolfin edge function containing the calculated
                     edge errors of the mesh
      :omega:        Weighting factor used to refine the mesh
    """
        mesh = self.mesh
        coord = mesh.coordinates()

        adjacent_points = {}
        mesh.init(1, 2)

        #Create copies of the x coordinates
        new_x = pl.copy(coord[:, 0])
        new_y = pl.copy(coord[:, 1])
        exterior_point = {}

        for v in vertices(mesh):
            adjacent_points[v.index()] = set()

        for e in facets(mesh):
            vert = e.entities(0)
            ext = e.exterior()
            for ii in (0, 1):
                adjacent_points[vert[ii]].add((vert[ii - 1], edge_errors[e]))
                adjacent_points[vert[ii - 1]].add((vert[ii], edge_errors[e]))
            exterior_point[vert[0]] = ext
            exterior_point[vert[1]] = ext

        for item in adjacent_points.iteritems():
            index, data = item
            x = coord[index, 0]
            y = coord[index, 1]
            x_sum = 0.0
            y_sum = 0.0
            wgt_sum = 0.0
            kbar = 0.0

            for entry in list(data):
                x_p = coord[entry[0], 0]
                y_p = coord[entry[0], 1]
                error = entry[1]
                kbar += 1. / len(list(data)) * error / pl.sqrt((x - x_p)**2 +
                                                               (y - y_p)**2)
            kbar = 0.0

            for entry in list(data):
                x_p = coord[entry[0], 0]
                y_p = coord[entry[0], 1]
                error = entry[1]
                k_ij = error / pl.sqrt((x - x_p)**2 + (y - y_p)**2)
                x_sum += (k_ij - kbar) * (x_p - x)
                y_sum += (k_ij - kbar) * (y_p - y)
                wgt_sum += k_ij

            if not exterior_point[index]:
                new_x[index] = x + omega * x_sum / wgt_sum
                new_y[index] = y + omega * y_sum / wgt_sum

        return new_x, new_y
Ejemplo n.º 30
0
def old_renormalize(x_unpurt,x_purt,total_epsilon,N):

    xnew = pl.copy(x_unpurt)
    
    # first determine which trajectories get to small
    # this variable is the amount by which a trajectory must grow otherwise it is going to get set
    # to fiducial position and prbably will not grow after that 
    check = total_epsilon/pl.sqrt(N)/2.0 
    
    # keep track of the index of particles that are "chaotic"
    chaotic = pl.array([])
    # keep track of their final distances too
    chaotic_dists = pl.array([])
    # now check
    for l in range(N):
        # what is the distace between the lth fiducial particle adn the perturbed?
        lth_distance,across_seam = distance(x_unpurt,x_purt,l,N)
        # if the distace is shorter across the seam of the boundary (across_seam = True) then set
        # the purtubed particle position to +- 2pi so the points are next to eachother. if the
        # purturbed particle position is less than the unpurturbed add 2pi. if the otherway around
        # minus 2pi
        if across_seam:
            if x_unpurt[l+N] < x_purt[l+N]:
                x_purt[l+N] = x_purt[l+N]-2.0*pl.pi
            if x_unpurt[l+N] > x_purt[l+N]:
                x_purt[l+N] = x_purt[l+N]+2.0*pl.pi
            # check new distance
            temp_dist,temp_across = distance(x_unpurt,x_purt,l,N)
            print('fixed a distance in renormalize. New dist is: ' +str(temp_dist))
        # after all this we need to re modulus the system --> at the end of this function

        if lth_distance < check:
            # set velocity to fiducial
            xnew[l] = x_unpurt[l]
            # set position to fiducial
            xnew[N+l] = x_unpurt[N+l]
        else:
            chaotic = pl.append(chaotic,l)
            chaotic_dists = pl.append(chaotic_dists,lth_distance)

    print('number of chaotic is: ' + str(len(chaotic)))

    # If there are no chaotic paticles than it is still useful to measure the
    # negative LE. To do this --> if there are no chaotic particle --> make a randome set of
    # purturbed initial conditions just like whats done in for the very first set of initial
    # conditions
    if len(chaotic)==0:
        for k in range(N):
            xnew = get_first_init(xnew,total_epsilon/pl.sqrt(N),k,N)

    else:
        # The best we can do is split the total epsilon up evenly between the chaotic particles.
        to_perturb = total_epsilon/pl.sqrt(float(len(chaotic)))
        # where len(chaotic) is the number of chaotic particles

        # enumerate chaotic for the indicies of particles to renormalize to distance to_perturb from
        # fiducial
        for n,m in enumerate(chaotic):
            # particle m will be chaotic_dists[n] from fudical
            # velocity
            xnew[m] = x_unpurt[m] + (to_perturb/chaotic_dists[n])*(x_purt[m]-x_unpurt[m])
            # position
            xnew[N+m] = x_unpurt[N+m] + (to_perturb/chaotic_dists[n])*(x_purt[N+m]-x_unpurt[N+m])

    # because of seam checking we need to re modulus the system
    xnew[N:]=xnew[N:]%(2.0*pl.pi)
    return xnew
Ejemplo n.º 31
0
__author__ = 'Rat'

from scipy.optimize import leastsq
from pylab import array, copy

def residuals(params, actual):
    print "params: " + str(params)
    print "actual: " + str(actual)
    print "expected: " + str(exp)
    result =  actual * params[0] / exp - 1.0
    print "result: " + str(result)
    return result


p0 = [0.6,0.6]

act = array([1.0,2.0,3.0,4.0])
exp = copy(act)
act *= 2.0
#print actual


result = leastsq(residuals, p0, args=(act))
Ejemplo n.º 32
0
def adj_cross_cor(ancl):
    
    # how much are we averaging over 
    how_much = int(40.0*pl.pi/ancl.dt)

    # array to store all the values found for each value of A so we can plot them
    k_arr = pl.array([])
    # array to store sweeping variables in
    var_arr = pl.array([])

    for i,j in enumerate(ancl.list_dir):
        if 'poindat.txt' not in j: continue

        print('working with file ' + str(j))
   
        f = open(j,'r')
        # Store the parameter value from the first line
        cur_sweep_var = float(f.readline().split()[-1])
        var_arr = pl.append(var_arr,cur_sweep_var)

        # data
        data = pl.genfromtxt(f)
        f.close()

        # velocities will be the first N columns 

        # We need to sort out which particle is which column. Positions will be the next N columns.
        # Give this job to a seperate function. F

        # Last positions will do fine
        pos = pl.copy(data[-1,ancl.N:])

        # define array to keep indexes in 
        i_arr = pl.array([])

        # Now comes figuring out the indexing
        for i in range(ancl.N):
            i_arr = pl.append(i_arr,pos.argmin())
            # set the min to be more than the max so that the next min can befound but the array retain
            # its shape
            pos[pos.argmin()]=pos.max()+1


        print('i_arr = ' + str(i_arr))

        # measure the phase corelation betwee adjacent particles.
        # See paper "Spatiotemporal oscillation patterns in the collective relaxation dynamics of
        # interacting particles in periodic potentials

        k = 0.0
        for i,j in enumerate(i_arr):
            v_i = pl.copy(data[-how_much:,j])
            #v_(i+1)
            if (i+1) == len(i_arr): 
                v_ip1 = data[-how_much:,i_arr[0]]
            else:
                v_ip1 = data[-how_much:,i_arr[i+1]]


            # These lines are just for debugging 
            numerator = (v_i*v_ip1).mean()*2.0
            print('numerator = ' +str(numerator))
            denom = ((v_i*v_i).mean() + (v_ip1*v_ip1).mean())
            print('denom = ' + str(denom))
            k_i = numerator/denom

            #k_i = (v_i*v_ip1).mean()*2.0/((v_i*v_i).mean() + (v_ip1*v_ip1).mean())

            k += k_i

        k = k/ancl.N
        k_arr = pl.append(k_arr,k)


    fig = pl.figure()
    ax = fig.add_subplot(111)
    # form of errorbar(x,y,xerr=xerr_arr,yerr=yerr_arr)
    pl.scatter(var_arr,k_arr,c='k')
    #pl.errorbar(var_arr,averages_2,yerr=std_arr,c='b',ls='none',fmt='o')
    ax.set_xlabel(ancl.sweep_str,fontsize=30)
    ax.set_ylabel(r'$K_v$',fontsize=30)
    fig.tight_layout()
    fig.savefig('adj_cross_cor.png',dpi=300)
    pl.close(fig)
Ejemplo n.º 33
0
    def findMaximaForThisScale(self, waveletScale):
        """
        Starting with the most intense local maxima and then removing points near ("near" based on
        particular wavelet scale under investigation) that maxima the first is detected. After
        the removal the next most intense local maxima should be the next real peak in the 
        coefficients and the process is repeated until all points have been removed.
        
        :param waveletScale: The scale for which the maxima are desired.
        :return: A list of indices corresponding to the locations of the maxima.
        """

        # when we are removing points adjacent to the current maxima this is the
        # number of points to go in either direction before stopping.
        #removeCutOff = int(round(waveletScale*2.5))
        #removeCutOff = int(round(waveletScale*2.5))
        removeCutOff = int(round(waveletScale * 2.5))

        maximaLocations = []

        # sort and keep track of the original idecies
        indexOfThisWaveletScale = self.mapScaleToIndex[waveletScale]

        curCoefficients = pl.copy(
            self.allCoefficients[indexOfThisWaveletScale])

        #if self.visualize:
        #    print "waveletScale: " + str(waveletScale)
        #    pl.plot(curCoefficients,marker='o')
        #    pl.show()

        # sorted stuff will be from smallest to greates
        indecies = sorted(range(len(curCoefficients)),
                          key=lambda k: curCoefficients[k])
        #curCoefficients.sort()

        mapIndexToBoolRemain = {}

        for i in range(0, len(indecies)):
            mapIndexToBoolRemain[indecies[i]] = True

        #for (int i = indecies.length-1; i>=0; i--){
        for i in range(len(indecies) - 1, 0 - 1, -1):
            if (mapIndexToBoolRemain[indecies[i]]):

                curLargestIndex = indecies[i]

                #if self.visualize:
                #    print "curLargestIndex: " + str(curLargestIndex)

                if (curCoefficients[curLargestIndex] <= 0.0):
                    continue

                maximaLocations.append(curLargestIndex)
                # remove points. num points to right and left equal to current scale
                mapIndexToBoolRemain[curLargestIndex] = False

                #for (int j=1; j<removeCutOff; j++){
                for j in range(1, removeCutOff):

                    curRemoveIndexRight = curLargestIndex + j
                    curRemoveIndexLeft = curLargestIndex - j

                    #if self.visualize and (j==(removeCutOff-1)):
                    #    print "curRemoveIndexRight: " + str(curRemoveIndexRight)
                    #    print "curRemoveIndexLeft: " + str(curRemoveIndexLeft)

                    if (curRemoveIndexLeft >= 0):
                        mapIndexToBoolRemain[curRemoveIndexLeft] = False

                    if (curRemoveIndexRight < len(self.x)):
                        mapIndexToBoolRemain[curRemoveIndexRight] = False

        #if self.visualize and (j==(removeCutOff-1)):
        #    print "maximaLocations: " + str(maximaLocations)

        return maximaLocations
Ejemplo n.º 34
0
                dE[k] -= lattice[temp] * lattice[tneigh] * u11 + u00 * (
                    (not lattice[temp]) * (not lattice[tneigh])
                )  #+ u10*(lattice[temp] + lattice[tneigh])*(not lattice[temp]*lattice[tneigh])
    t1 = time.time()
    return dE, t1 - t0


# create 3d cubic lattice (NxNxN)
N = 40
lattice = p.zeros(N * N * N, dtype=n.int)
# populate initially randomly with given number fraction
x1 = 0.5  # x1: fraction of ones
N_ones = int(x1 * len(lattice))  # actual number of ones
lattice[list(n.random.permutation(p.arange(len(lattice)))[:N_ones])] = 1
lattice = lattice.reshape((N, N, N))
init_lattice = p.copy(lattice)

percolator = SitePercolator3D(
    N, N, N, 0., 0.,
    0.)  # parameters are not needed, since we take the geometry from here

# let neighbors switch place with thermal activation (periodic boundary conditions)
# Metropolis algorithm for updating
maxsteps = 1000000
kT = 0.031
u11 = -.5  # in eV
u00 = -.5  # in eV
u10 = 0.  # in eV
d_utransf = 0.00  # energy barrier for place exchange, in eV

neighbor_kernel = n.array([[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0],