def VisualizeEigenfield(n=3,z=30,scale=50,\
                        a=30,*args,**kwargs):
    
    zs=numpy.linspace(0,a*scale,200)
    rs=numpy.linspace(-a*scale/2.,a*scale/2.,200)
    from common.numerics import broadcast_items
    from scipy.special import j0,j1
    
    d=get_tip_eigenbasis_expansion(z,a=a,*args,**kwargs)
    field_contributions=AWA(P[:,n-1],axes=[tip.LRM.qxs],axis_names=['$q/a$']).squeeze()
    
    zs_norm=zs/float(a)
    rs_norm=rs/float(a)
    rs_norm,zs_norm=broadcast_items(rs_norm,zs_norm)
    
    total_zfield=0
    total_rfield=0
    potential=0
    for field_contrib,q,wq in zip(field_contributions,tip.LRM.qxs,tip.LRM.wqxs):
        
        zpos_dependence=numpy.exp(-q*z/numpy.float(a))*numpy.exp(-q*zs_norm)*j0(q*rs_norm)
        rpos_dependence=numpy.exp(-q*z/numpy.float(a))*numpy.exp(-q*zs_norm)*j1(q*rs_norm)
        
        total_zfield+=(-field_contrib)*zpos_dependence*wq
        total_rfield+=(-field_contrib)*rpos_dependence*wq
        potential+=field_contrib*zpos_dependence*wq/q
        
    total_zfield=AWA(total_zfield,axes=[rs,zs],axis_names=['r','z'])
    total_rfield=AWA(total_rfield,axes=[rs,zs],axis_names=['r','z'])
    potential=AWA(potential,axes=[rs,zs],axis_names=['r','z'])
    potential-=numpy.mean(potential.cslice[:,z])
    
    return total_zfield,total_rfield,potential
Example #2
0
def RibbonTest(_N_sample_eigenbasis=100, _N_tip_eigenbasis=1):
    show_eigs = False
    run_test = True
    xs, ys = np.arange(101), np.arange(101)
    L = xs.max()
    xv, yv = np.meshgrid(xs, ys)
    eigpairs = {}

    graphene_ribbon = False
    if graphene_ribbon:
        q0 = np.pi / L  #This is for particle in box (allowed wavelength is n*2*L)
        for n in range(1, _N_sample_eigenbasis + 1):
            qx = n * q0
            pw = AWA(planewave(qx, 0, xv, yv, x0=0, phi0=np.pi / 2),
                     axes=[xs, ys])  #cosine waves, this is for
            eigpairs[qx**2] = pw / np.sqrt(np.sum(pw**2))

    else:

        q0 = 2 * np.pi / L  #This is for infinite sample
        for n in range(1, _N_sample_eigenbasis + 1):
            qx = n * q0
            pw = AWA(planewave(qx, 0, xv, yv, x0=0, phi0=np.pi / 2),
                     axes=[xs, ys])  #cosine waves
            eigpairs[qx**2] = pw / np.sqrt(np.sum(pw**2))

            pw2 = AWA(planewave(qx, 0, xv, yv, x0=0, phi0=0),
                      axes=[xs, ys])  #sine waves, This is for infinite sample
            eigpairs[qx**2 + 1e-9] = pw2 / np.sqrt(np.sum(
                pw2**2))  #This is for infinite sample

    if show_eigs:
        for i, q in enumerate(list(eigpairs.keys())):
            if i < 5:
                plt.figure()
                plt.imshow(eigpairs[q])
                plt.title("q={}".format(q))
        plt.show()

    if run_test:
        q = 2 * np.pi / L * 20
        Sample = SampleResponse(eigpairs,
                                qw=q,
                                N_sample_eigenbasis=_N_sample_eigenbasis)
        Tip = TipResponse(Sample.xs,
                          Sample.ys,
                          q=q,
                          N_tip_eigenbasis=_N_tip_eigenbasis)

        d = Sample.RasterScan(Tip)
        plt.figure()
        plt.imshow(np.abs(d['P']))
        plt.title('P')
        plt.colorbar()
        plt.figure()
        plt.imshow(np.abs(d['R']))
        plt.title('R')
        plt.colorbar()
        plt.show()
Example #3
0
    def __call__(self,excitations,U,tip_eigenbasis):
        if np.array(excitations).ndim==2: excitations=[excitations]
        Exc=np.array([exc.ravel() for exc in excitations])
        tip_eb=np.matrix([eigfunc.ravel() for eigfunc in tip_eigenbasis])
        projected_result=np.dot(tip_eb.T,
                            np.dot(U,\
                                np.dot(self.D,\
                                    np.dot(self.Phis,Exc.T))))

        result=np.dot(self.Phis.T,\
                       np.dot(self.D,\
                             np.dot(self.Phis,Exc.T)))
        result=np.array(result).T.reshape((len(excitations),)+self.phishape)
        projected_result=np.array(projected_result).T.reshape((len(excitations),)+self.phishape)
        return AWA(result,axes=[None,self.xs,self.ys]).squeeze(), AWA(projected_result,axes=[None,self.xs,self.ys]).squeeze()
 def get_Erad(self,beta,zs=None,Nterms=None,interpolation='linear'):
     
     if hasattr(beta,'__len__'):
         if isinstance(beta,numpy.ndarray):
             if not beta.ndim: beta=beta.tolist()
         else: beta=numpy.array(beta)
     
     if isinstance(beta,numpy.ndarray): beta=beta.reshape((len(beta),1,1))
     
     if zs is None: zs=self.zs
     Rs=self.evaluate_residues(zs,Nterms)#,interpolation)
     Ps=self.evaluate_poles(zs,Nterms)#,interpolation)
     
     #Should broadcast over freqs if beta has an additional first axis
     #The offset term is absolutely critical, offsets false z-dependence arising from first terms
     approach=numpy.sum(Rs*(1/(beta-Ps)+1/Ps),axis=-1)
     
     axes=[zs]; axis_names=['z/a']
     if hasattr(beta,'__len__'):
         approach=approach.transpose()
         if isinstance(beta,AWA):
             axes=axes+[beta.axes[0]]
             axis_names=axis_names+[beta.axis_names[0]]
         else:
             axes=axes+[None]
             axis_names=axis_names+[None]
     
     signals=AWA(approach,axes=axes,axis_names=axis_names).squeeze()
     if not signals.ndim: signals=signals.tolist()
     
     return signals
def get_tip_eigenbasis_expansion(z=.1,freq=1000,a=30,\
                                 smoothing=0,reload_signal=True,\
                                 *args,**kwargs):
    """Appears to render noise past the first 15 eigenvalues.  
    Smoothing by ~4 may be justified, removing zeros probably not..."""
    
    # Rely on Lightning Rod Model only to load tip data from file in its process of computing a signal #
    if reload_signal:
        tip.verbose=False
        signal=tip.LRM(freq,rp=mat.Au.reflection_p,zmin=z,amplitude=0,\
                       normalize_to=None,normalize_at=1000,Nzs=1,demodulate=False,\
                       *args,**kwargs)
        tip.verbose=True

    global L,g,M,alphas,P,Ls,Es

    #Get diagonal basis for the matrix
    L=tip.LRM.LambdaMatrix(tip.LRM.qxs)
    g=numpy.matrix(numpy.diag(-tip.LRM.qxs*numpy.exp(-2*tip.LRM.qxs*z/numpy.float(a))*tip.LRM.wqxs))
    M=AWA(L*g,axes=[tip.LRM.qxs]*2,axis_names=['q']*2)
    
    #Smooth along s-axis (first), this is where we truncated the integral xform
    if smoothing: M=numrec.smooth(M,axis=0,window_len=smoothing) 
        
    alphas,P=linalg.eig(numpy.matrix(M))
    P=numpy.matrix(P)
    
    Ls=numpy.array(P.getI()*tip.LRM.Lambda0Vector(tip.LRM.qxs)).squeeze()
    Es=numpy.array(numpy.matrix(tip.LRM.get_dipole_moments(tip.LRM.qxs))*g*P).squeeze()
    
    Rs=-Es*Ls/alphas**2
    Ps=1/alphas
    
    return {'Rs':Rs,'Ps':Ps,'Es':Es,'alphas':alphas,'Ls':Ls}
Example #6
0
 def __call__(self, x0, y0):
     shift_by_dx = x0 - self.midx
     shift_by_dy = y0 - self.midy
     shift_by_nx = int(self.shape[0] * shift_by_dx / self.dx)
     shift_by_ny = int(self.shape[1] * shift_by_dy / self.dy)
     newJ=np.roll(np.roll(self.bigF,shift_by_nx,axis=0),\
                  shift_by_ny,axis=1)
     output = newJ[self.shape[0]//2:(3*self.shape[0])//2,\
                  self.shape[1]//2:(3*self.shape[1])//2]
     return AWA(output, axes=[self.xs, self.ys])
def InvertImage(image,nodes,normalize_to=mat.Au,Nterms=8,freq=1000):
    
    row_len=image.shape[1]
    signals=uncoil_image(image)
    beta_norm=normalize_to.reflection_p(freq,q=1/(30e-7))
    norm_signal=EigenfieldModel.get_signal_from_nodes(beta=beta_norm,nodes=nodes,Nterms=Nterms)
    betas=EigenfieldModel.invert_signal(signals*norm_signal,nodes=nodes,Nterms=Nterms,select_by='pole')
    betas_image=coil_image(betas,row_len=row_len)
    
    if isinstance(image,AWA): betas_image=AWA(betas_image); betas_image.adopt_axes(image)
    
    return betas_image
def GetExpansionApproachCurve(zs=numpy.linspace(.1,150,50),a=30,beta=1,Nterms=None,\
                              *args,**kwargs):
    
    if hasattr(beta,'__len__'):
        if not isinstance(beta,numpy.ndarray): beta=numpy.array(beta)
        beta=beta.reshape((len(beta),1,1))
    
    Rs=[]
    Ps=[]
    for i,z in enumerate(zs):
        if i==0: kwargs['reload_signal']=True
        else: kwargs['reload_signal']=False
        d=get_tip_eigenbasis_expansion(z,a=a,*args,**kwargs)
        Rs.append(d['Rs'])
        Ps.append(d['Ps'])
        
    Rs=numpy.array(Rs); Ps=numpy.array(Ps)
    if not Nterms: Nterms=Ps.shape[1]
    Rs=AWA(Rs[:,:Nterms],axes=[zs,None],axis_names=['Z','Term'])
    Ps=AWA(Ps[:,:Nterms],axes=[zs,None],axis_names=['Z','Term'])
    
    approach=numpy.sum(Rs/(beta-Ps)+Rs/Ps,axis=-1)
    #beta*Rs/(poles-beta)=sig
    #0=sig+beta*Rs/(beta-poles)
    #0=sig/beta+Rs/(beta-poles)
    
    axes=[zs]; axis_names=['Z']
    if hasattr(beta,'__len__'):
        approach=approach.transpose()
        if isinstance(beta,AWA):
            axes=axes+[beta.axes[0]]
            axis_names=axis_names+[beta.axis_names[0]]
        else:
            axes=axes+[None]
            axis_names=axis_names+[None]
        
    approach=AWA(approach,axes=axes,axis_names=axis_names)
    
    return {'Rs':Rs,'Ps':Ps,'signals':approach}
 def get_signal_from_nodes(self,beta,nodes=[(0,1)],Nterms=None,interpolation='linear'):
     
     if not hasattr(beta,'__len__'): beta=[beta]
     if not isinstance(beta,AWA): beta=AWA(beta)
     
     #`Frequency` axis will be first
     if isinstance(beta,numpy.ndarray): beta=beta.reshape((len(beta),1,1))
     
     #Weights apply across z-values
     zs,ws=list(zip(*nodes))
     ws_grid=numpy.array(ws).reshape((1,len(ws),1))
     zs=numpy.array(zs)
     
     #Evaluate at all nodal points
     Rs=self.evaluate_residues(zs,Nterms,interpolation)
     Ps=self.evaluate_poles(zs,Nterms,interpolation)
     
     #Should broadcast over freqs if beta has an additional first axis
     #The offset term is absolutely critical, offsets false z-dependence arising from first terms
     approach=numpy.sum(numpy.sum(Rs*(1/(beta-Ps)+1/Ps)*ws_grid,axis=-1),axis=-1)#+Rs/Ps,axis=-1)
     
     axes=[zs]; axis_names=['z/a']
     if hasattr(beta,'__len__'):
         approach=approach.transpose()
         if isinstance(beta,AWA):
             axes=axes+[beta.axes[0]]
             axis_names=axis_names+[beta.axis_names[0]]
         else:
             axes=axes+[None]
             axis_names=axis_names+[None]
     
     signals=AWA(approach); signals.adopt_axes(beta)
     signals=signals.squeeze()
     if not signals.ndim: signals=signals.tolist()
     
     return signals
Example #10
0
def load_eigpairs(basedir=os.path.dirname("./"),eigpair_fname="UnitSquareMesh_100x100_1000_eigenbasis.h5"):
    """Normalization by sum always ensures that integration will be like summing, which is
    much simpler than keeping track of dx, dy..."""

    global eigpairs
    eigpairs = dict()

    path=os.path.join(basedir,eigpair_fname)

    with h5py.File(path,'r') as f:
        for key in list(f.keys()):
            eigfunc=np.array(f.get(key))
            eigfunc/=np.sqrt(np.sum(np.abs(eigfunc)**2))
            eigpairs[float(key)] = AWA(eigfunc,\
                                       axes=[np.linspace(0,1,eigfunc.shape[0]),\
                                             np.linspace(0,1,eigfunc.shape[1])])
    return eigpairs
def PlotHarmonicQuadratures(beta,amplitude=2,quadratures=quadratures,\
                               harmonic=3,**kwargs):
    
    global quad_harmonic,ref_harmonic,harmonic_value
    signal=EigenfieldModel(beta=beta)
    
    harmonics={}
    for quadrature_name,pair in quadratures:
        quadrature,Nts=pair
        harmonics_this_quad=[]
        for Nt in Nts:
            harmonics_this_Nt=EigenfieldModel.demodulate(signal,quadrature=quadrature,\
                                                           amplitude=amplitude,harmonics=[harmonic],Nts=Nt,**kwargs)
            harmonic_value=harmonics_this_Nt.cslice[harmonic].squeeze().tolist()
            harmonics_this_quad.append(numpy.complex(harmonic_value))
            
        harmonics_this_quad=numpy.array(harmonics_this_quad,dtype=numpy.complex)
        harmonics[quadrature_name]=AWA(harmonics_this_quad,axes=[Nts],axis_names=['$N_{quad}$'])
    
    import itertools
    
    figure()
    markers=[None,'o','s','^','+','^']
    markers = itertools.cycle(markers)
    
    ref_quad='Gauss-Legendre'
    ref_harmonic=harmonics[ref_quad][-1]
    
    for quadrature_name in zip(*quadratures)[0]:
        quad_harmonic=harmonics[quadrature_name]
        marker = next(markers)
        numpy.abs((quad_harmonic-ref_harmonic)/ref_harmonic)\
                    .plot(plotter=semilogy,marker=marker,label=quadrature_name,markersize=5)
        
    ylabel('$S_%i\,\mathrm{rel.\,error}$'%harmonic)
    leg=legend(loc='best',fancybox=True,shadow=True)
    leg.get_frame().set_linewidth(.1)
    xlim(0,200)
    
    tight_layout()
    
    return harmonics
Example #12
0
    def __init__(self, eigpairs, E, N=100, debug=True):
        # Setting the easy stuff
        eigvals = list(eigpairs.keys())
        eigfuncs = list(eigpairs.values())
        self.debug = debug
        self.xs, self.ys = eigfuncs[0].axes
        self.eigfuncs = AWA(eigfuncs,\
                          axes=[eigvals,self.xs,self.ys]).sort_by_axes()
        self.eigvals = self.eigfuncs.axes[0]
        self.phishape = self.eigfuncs[0].shape
        self.E = E
        self.N = N

        # Setting the various physical quantities
        self._SetUseEigenvalues(E)
        self._SetEnergy()
        self._SetSigma(10, 10)
        self._SetCoulombKernel()
        self._SetScatteringMatrix()

        self.Us = []
def VisualizeEigenfieldDistributions(ns=[1,2,3],zs=[.1,5,15,30],a=30,*args,**kwargs):
    
    zs.sort()
    eigs=dict([(n,[]) for n in ns])
    for i,z in enumerate(zs):
        if i==0: kwargs['reload_signal']=True
        else: kwargs['reload_signal']=False
        d=get_tip_eigenbasis_expansion(z,a=a,*args,**kwargs)
        for n in ns:
            eigs[n].append(AWA(P[:,n-1],axes=[tip.LRM.qxs],axis_names=['$q/a$']).squeeze())
            
    colors=list(zip(numpy.linspace(1,0,len(zs)),\
               [0]*len(zs),\
               numpy.linspace(0,1,len(zs))))
            
    #return eigs
            
    figure()
    for i,n in enumerate(ns):
        subplot(1,len(ns),i+1)
        for j in range(len(zs)):
            numpy.abs(eigs[n][j]).plot(plotter=semilogx,\
                                       color=colors[j],\
                                       label='$z/a=%1.1f$'%(zs[j]/numpy.float(a)))
        
        props = dict(boxstyle='round', facecolor='white', alpha=0.8)
        gca().text(0.67, 0.85, '$n=%i$'%n, transform=gca().transAxes, fontsize=24,bbox=props)
        
        if i==0: ylabel('$E_n(q)$')
        else:
            yticks(numpy.arange(5)*.05,['']*5)
            xts=xticks()
            xticks(xts[0][1:])
            
        xlim(1e-4,20)
        ylim(0,.2)
        grid()
            
    tight_layout()
    subplots_adjust(wspace=0)
Example #14
0
    def __init__(self, eigpairs, qw=44, N_sample_eigenbasis=100):
        # Setting the easy stuff
        eigvals = list(eigpairs.keys())
        eigfuncs = list(eigpairs.values())
        self.xs, self.ys = eigfuncs[0].axes
        self.eigfuncs = AWA(eigfuncs,\
                          axes=[eigvals,self.xs,self.ys]).sort_by_axes()
        self.eigvals = self.eigfuncs.axes[0]
        self.phishape = self.eigfuncs[0].shape
        self.qw = qw
        self.N = N_sample_eigenbasis
        sigma_1, sigma_2 = np.real(np.exp(2 * np.pi * 1j * 0.05)), np.imag(
            np.exp(2 * np.pi * 1j * 0.05))
        #lambda_p, L_p = 10,10

        # Setting the various physical quantities
        self._SetAlpha(sigma_1, sigma_2)
        self._SetUseEigenvalues()
        self._SetCoulombKernel()
        self._SetScatteringMatrix()

        self.Us = []
 def demodulate(signals,amplitude=2,harmonics=list(range(4)),Nts=None,\
                quadrature=numrec.GL):
     """Takes z-axis as first axis, frequency as final axis."""
 
     global ts,wts,weights,signals_vs_time
 
     #max harmonic resolvable will be frequency = 1/dt = Nts
     if not Nts: Nts=4*numpy.max(harmonics)
     if isinstance(quadrature,str) or hasattr(quadrature,'calc_nodes'):
         ts,wts=numrec.GetQuadrature(N=Nts,xmin=-.5,xmax=0,quadrature=quadrature)
         
     else:
         ts,wts=numpy.linspace(-.5,0,Nts),None
         if quadrature is None: quadrature=simps
     
     zs=amplitude*(1+numpy.cos(2*numpy.pi*ts))
     
     harmonics=numpy.array(harmonics).reshape((len(harmonics),1))
     weights=numpy.cos(2*numpy.pi*harmonics*ts)
     if wts is not None: weights*=wts
     weights_grid=weights.reshape(weights.shape+(1,)*(signals.ndim-1))
     
     signals_vs_time=signals.interpolate_axis(zs,axis=0,bounds_error=False,extrapolate=True)
     signals_vs_time.set_axes([ts],axis_names=['t'])
     integrand=signals_vs_time*weights_grid
     
     if wts is not None:
         demodulated=2*2*numpy.sum(integrand,axis=1) #perform quadrature
     else: demodulated=2*2*quadrature(integrand,x=ts,axis=1)
 
     axes=[harmonics]; axis_names=['harmonic']
     if isinstance(signals,AWA):
         axes+=signals.axes[1:]
         axis_names+=signals.axis_names[1:]
     demodulated=AWA(demodulated,axes=axes,axis_names=axis_names)
     
     return demodulated
Example #16
0
 def __call__(self, x0, y0):
     tip_eb = [t(x0, y0) for t in self.eigenbasis_translators]
     return AWA(tip_eb, axes=[None, tip_eb[0].axes[0], tip_eb[0].axes[1]])
Example #17
0
def CompareCarbonylTips(Ls=numpy.linspace(30,20e3,100),a=30,\
                                  wavelength=6e3,amplitude=60,lift=30,\
                                  geometries=['cone','hyperboloid'],\
                                  taper_angles=[20],\
                                  load=True,save=True,demo_file='CarbonylTips.pickle',\
                                  enhancement_index=2):

    carbonyl_freqs = numpy.linspace(1730, 1750, 10)
    normalize_at_freq = numpy.mean(carbonyl_freqs)
    freqs = [a / numpy.float(wavelength)
             ]  #load frequency should be quite close to `sio2_freq`
    Ls = Ls / float(a)
    freq_labels = ['ED']
    skin_depth = .05

    ##################################################
    ## Load or compute Carbonyl measurement metrics ##
    ##################################################
    global d
    d_keys=['max_s3','max_s2',\
            'max_rel_absorption','max_absorption','max_freq',\
            'charges0','charges_q0','Lambda0','enhancement',\
            'signals','norm_signals','DCSD_contact','DCSD_lift',\
            'psf_contact','psf_lift']

    demo_path = os.path.join(root_dir, demo_file)
    if load and os.path.isfile(demo_path):
        Logger.write('Loading lengths data...')
        d = pickle.load(open(demo_path))

    else:
        tip.LRM.load_params['reload_model'] = True
        tip.LRM.geometric_params['skin_depth'] = skin_depth

        d = {}

        for i, geometry in enumerate(geometries):
            for n, taper_angle in enumerate(taper_angles):

                #If geometry is not conical, do not iterate to additional taper angles, these are all the same.
                if geometry not in ['PtSi', 'hyperboloid', 'cone'] and n > 0:
                    break

                tip.LRM.geometric_params['geometry'] = geometry
                tip.LRM.geometric_params['taper_angle'] = taper_angle
                signals_for_geometry = {}

                for j, freq in enumerate(freqs):
                    signals_for_freq = dict([(key, []) for key in d_keys])

                    for k, L in enumerate(Ls):
                        tip.LRM.geometric_params['L'] = int(L)
                        Logger.write(
                            'Currently working on geometry "%s", freq=%s, L/a=%s...'
                            % (geometry, freq, L))

                        #Make sure to calculate on zs all the way up to zmax=2*amplitude+lift
                        carbonyl_vals=tip.LRM(carbonyl_freqs,rp=Carbonyl.reflection_p,Nqs=72,zmin=.1,a=a,amplitude=amplitude+lift/2.,\
                                              normalize_to=mat.Si.reflection_p,normalize_at=normalize_at_freq,load_freq=freq,Nzs=30)

                        ##Get overall signal with approach curve##
                        global carbonyl_sigs
                        carbonyl_sigs = carbonyl_vals['signals'] * numpy.exp(
                            -1j * numpy.angle(
                                carbonyl_vals['norm_signals'].cslice[0]))
                        carbonyl_rel_sigs = carbonyl_vals[
                            'signals'] / carbonyl_vals['norm_signals'].cslice[0]
                        ind = numpy.argmax(carbonyl_rel_sigs.imag.cslice[0]
                                           )  #Find maximum phase in contact

                        signals_for_freq['max_s3'].append(
                            carbonyl_vals['signal_3']
                            [ind])  #Pick out the peak frequency
                        signals_for_freq['max_s2'].append(
                            carbonyl_vals['signal_2']
                            [ind])  #Pick out the peak frequency
                        signals_for_freq['max_rel_absorption'].append(
                            carbonyl_rel_sigs[:, ind].imag.cslice[0]
                        )  #Relative to out-of-contact on reference
                        signals_for_freq['max_absorption'].append(
                            carbonyl_sigs[:, ind].imag.cslice[0]
                        )  #Absolute signal
                        signals_for_freq['max_freq'].append(
                            carbonyl_freqs[ind])
                        signals_for_freq['charges0'].append(
                            tip.LRM.charges0 /
                            (2 * numpy.pi * tip.LRM.charge_radii))
                        signals_for_freq['charges_q0'].append(
                            tip.LRM.charges.cslice[0] /
                            (2 * numpy.pi * tip.LRM.charge_radii))
                        signals_for_freq['Lambda0'].append(tip.LRM.Lambda0)
                        signals_for_freq['enhancement'].append(
                            numpy.abs(2 * tip.LRM.charges0[enhancement_index]))
                        signals_for_freq['signals'].append(
                            carbonyl_sigs[:,
                                          ind])  #Pick out the peak frequency
                        signals_for_freq['norm_signals'].append(
                            carbonyl_vals['norm_signals'])
                        signals_for_freq['DCSD_contact'].append(
                            get_DCSD(carbonyl_sigs[:, ind],
                                     zmin=0.1,
                                     amplitude=60,
                                     lift=0))
                        signals_for_freq['DCSD_lift'].append(
                            get_DCSD(carbonyl_sigs[:, ind],
                                     zmin=0.1,
                                     amplitude=60,
                                     lift=lift))

                        ##Isolate some point-spread functions##
                        rs = numpy.linspace(0, 5, 200).reshape((200, 1))
                        integrand_contact=AWA(tip.LRM.qxs**2*\
                                              special.j0(tip.LRM.qxs*rs)*tip.LRM.get_dipole_moments(tip.LRM.qxs)*\
                                              tip.LRM.wqxs,axes=[rs.squeeze(),tip.LRM.qxs])
                        integrand_contact=integrand_contact.interpolate_axis(numpy.logspace(numpy.log(tip.LRM.qxs.min())/numpy.log(10),\
                                                                                            numpy.log(tip.LRM.qxs.max())/numpy.log(10),1000),\
                                                                       axis=1)
                        psf_contact = numpy.sum(integrand_contact, axis=1)
                        signals_for_freq['psf_contact'].append(
                            AWA(psf_contact,
                                axes=[rs.squeeze()],
                                axis_names=['r/a']))

                        integrand_lift=AWA(tip.LRM.qxs**2*numpy.exp(-tip.LRM.qxs*lift/numpy.float(a))*\
                                           special.j0(tip.LRM.qxs*rs)*tip.LRM.get_dipole_moments(tip.LRM.qxs)*\
                                           tip.LRM.wqxs,axes=[rs.squeeze(),tip.LRM.qxs])
                        integrand_lift=integrand_lift.interpolate_axis(numpy.logspace(numpy.log(tip.LRM.qxs.min())/numpy.log(10),\
                                                                                      numpy.log(tip.LRM.qxs.max())/numpy.log(10),1000),\
                                                                       axis=1)
                        psf_lift = numpy.sum(integrand_lift, axis=1)
                        signals_for_freq['psf_lift'].append(
                            AWA(psf_lift,
                                axes=[rs.squeeze()],
                                axis_names=['r/a']))

                        progress = (i * len(Ls) * len(freqs) + j * len(Ls) +
                                    k + 1) / numpy.float(
                                        len(Ls) * len(freqs) *
                                        len(geometries)) * 100
                        Logger.write('\tProgress: %1.1f%%' % progress)

                    for key in list(signals_for_freq.keys()):
                        if numpy.array(
                                signals_for_freq[key]
                        ).ndim == 2:  #Prepend probe length axis and expand these into AWA's
                            axes = [
                                a * Ls / numpy.float(wavelength),
                                signals_for_freq[key][0].axes[0]
                            ]
                            axis_names = [
                                '$L/\lambda_\mathrm{C=O}$',
                                signals_for_freq[key][0].axis_names[0]
                            ]
                            signals_for_freq[key]=AWA(numpy.array(signals_for_freq[key],dtype=numpy.complex),\
                                                          axes=axes,axis_names=axis_names)
                        else:
                            signals_for_freq[key]=AWA(numpy.array(signals_for_freq[key],dtype=numpy.complex),\
                                                          axes=[a*Ls/numpy.float(wavelength)],\
                                                          axis_names=['$L/\lambda_\mathrm{C=O}$'])

                    signals_for_geometry[freq_labels[j]] = signals_for_freq

                geometry_name = geometry
                if geometry in ['PtSi', 'hyperboloid', 'cone']:
                    geometry_name += str(taper_angle)

                d[geometry_name] = signals_for_geometry

    if not load and save:
        Logger.write('Saving tip comparison data...')
        file = open(demo_path, 'wb')
        pickle.dump(d, file)
        file.close()

    return d
Example #18
0
from common.baseclasses import AWA
from common.numerical_recipes import QuickConvolver as QC
import matplotlib.pyplot as plt
import numpy as np

image=AWA(np.zeros((101,101)),axes=[np.linspace(-.5,.5,101)]*2)
image[50,50]=1

kernel_function=lambda x,y: 1/np.sqrt(x**2+y**2+1e-8**2)
qc1=QC(size=(1,1),shape=(101,101),pad_by=.5,kernel_function=kernel_function)
result1=qc1(image)
result1-=result1.min() #overall offset, while correct, should not be meaningful
result1[result1==result1.max()]=0 #point at center is controlled by 1e-8

kernel_function_fourier=lambda kx,ky: 2*np.pi/np.sqrt(kx**2+ky**2+1e-8**2)
qc2=QC(size=(1,1),shape=(101,101),pad_by=.5,kernel_function_fourier=kernel_function_fourier)
result2=qc2(image)
result2-=result2.min() #overall offset is controlled by 1e-8

plt.figure();result1.cslice[0].plot()
result2.cslice[0].plot()
plt.gca().set_xscale('symlog',linthreshx=1e-2)
plt.show()
def PlotApproachCurves(zmax=30,materials={'Gold':(mat.Au,1000),\
                                          'Carbonyl':(mat.PMMA,1735),\
                                          r'$\mathrm{SiC}(\omega_{SO})$':(mat.SiC_6H_Ellips,945),\
                                          r'$\mathrm{SiC}(\omega_{-})$':(mat.SiC_6H_Ellips,920),\
                                          r'$\mathrm{SiC}(\omega_{+})$':(mat.SiC_6H_Ellips,970)},\
                       Nterms=15):
    
    ordered=['Gold','Carbonyl',
             r'$\mathrm{SiC}(\omega_{-})$',
             r'$\mathrm{SiC}(\omega_{SO})$',\
             r'$\mathrm{SiC}(\omega_{+})$']
    colors=['c','g',(1,0,0),(.7,0,.7),(0,0,1)]
    
    global beta,LRM_signals,EFM_signals,material_name
    zs=numpy.logspace(numpy.log(.1/30.)/numpy.log(10.),numpy.log(zmax)/numpy.log(10.),100)
    zs2=numpy.logspace(numpy.log(.1/30.)/numpy.log(10.),numpy.log(15)/numpy.log(10.),100)
    
    LRM_signals={}
    EFM_signals={}
    for i,(material_name,pair) in enumerate(materials.items()):
        
        material,freq=pair
        beta=material.reflection_p(freq,q=1/(30e-7))
        LRM_signals[material_name]=tip.LRM(freq,rp=beta,a=30,zs=zs*30,\
                                           Nqs=244,demodulate=False,normalize_to=None)['signals']
        LRM_signals[material_name].set_axes([zs])
        EFM_signals[material_name]=EigenfieldModel(beta,zs=zs2,Nterms=Nterms)
        
        if i==0: tip.LRM.load_params['reload_model']=False
        
    tip.LRM.load_params['reload_model']=True
        
    figure()
    ref_signal=LRM_signals['Gold'].cslice[zmax]
    
    subplot(121)
    for material_name,color in zip(ordered,colors):
        numpy.abs(LRM_signals[material_name]/ref_signal).plot(label=material_name,plotter=loglog,color=color)
        numpy.abs(EFM_signals[material_name]/ref_signal).plot(color=color,marker='o',ls='')
        
    ylabel('$|E_\mathrm{rad}|\,[\mathrm{a.u.}]$')
    xlabel('$d/a$')
    ylim(3e-1,3e1)
    grid()
    
    leg=legend(loc='lower left',fancybox=True,shadow=True)
    leg.get_frame().set_linewidth(.1)
    for t in leg.texts: t.set_fontsize(18)
        
    subplot(122)
    for material_name,color in zip(ordered,colors):
        p_LRM=AWA(numpy.unwrap(numpy.angle(LRM_signals[material_name]/ref_signal)))
        p_LRM.adopt_axes(LRM_signals[material_name])
        p_EFM=AWA(numpy.unwrap(numpy.angle(EFM_signals[material_name]/ref_signal)))
        p_EFM.adopt_axes(EFM_signals[material_name])
        
        (p_LRM/(2*numpy.pi)).plot(label=material_name,plotter=semilogx,color=color)
        (p_EFM/(2*numpy.pi)).plot(color=color,marker='o',ls='')
        
    ylabel(r'$\mathrm{arg}(E_\mathrm{rad})\,/\,2\pi$')
    xlabel('$d/a$')
    ylim(-.05,.5)
    grid()
    
    gcf().set_size_inches([12.5,7],forward=True)
    tight_layout()
    subplots_adjust(wspace=.3)
Example #20
0
run_test = True
global eigpairs
xs, ys = np.arange(101), np.arange(101)
L = xs.max()
xv, yv = np.meshgrid(xs, ys)
#eigpairs = load_eigpairs(basedir="../sample_eigenbasis_data")
eigpairs = {}

Nqs = 100
graphene_ribbon = True
if graphene_ribbon:

    q0 = np.pi / L  #This is for particle in box (allowed wavelength is n*2*L)
    for n in range(1, Nqs + 1):
        qx = n * q0
        pw = AWA(planewave(qx, 0, xv, yv, x0=0, phi0=pi / 2),
                 axes=[xs, ys])  #cosine waves, this is for
        eigpairs[qx**2] = pw / np.sqrt(np.sum(pw**2))

else:

    q0 = 2 * np.pi / L  #This is for infinite sample
    for n in range(1, Nqs + 1):
        qx = n * q0
        pw = AWA(planewave(qx, 0, xv, yv, x0=0, phi0=np.pi / 2),
                 axes=[xs, ys])  #cosine waves
        eigpairs[qx**2] = pw / np.sqrt(np.sum(pw**2))

        pw2 = AWA(planewave(qx, 0, xv, yv, x0=0, phi0=0),
                  axes=[xs, ys])  #sine waves, This is for infinite sample
        eigpairs[qx**2 + 1e-9] = pw2 / np.sqrt(np.sum(
            pw2**2))  #This is for infinite sample
 def invert_signal(self,signals,nodes=[(1,0)],Nterms=10,\
                   interpolation='linear',\
                   select_by='continuity',\
                   closest_pole=0,\
                   scaling=10):
     """The inversion is not unique, consequently the selected solution
     will probably be wrong if signal values correspond with 
     "beta" values that are too large (`|beta|~>min{|Poles|}`).
     This can be expected to break at around `|beta|>2`."""
     #Default is to invert signal in contact
     #~10 terms seem required to converge on e.g. SiO2 spectrum,
     #especially on the Re(beta)<0 (low signal) side of phonons
     
     global roots,poly,root_scaling
     
     #global betas,all_roots,pmin,rs,ps,As,Bs,roots,to_minimize
     if self.verbose:
         Logger.write('Inverting `signals` based on the provided `nodes` to obtain consistent beta values...')
     
     if not hasattr(signals,'__len__'): signals=[signals]
     if not isinstance(signals,AWA): signals=AWA(signals)
     
     zs,ws=list(zip(*nodes))
     ws_grid=numpy.array(ws).reshape((len(ws),1)) #last dimension is to broadcast over all `Nterms` equally
     zs=numpy.array(zs)
     
     Rs=self.Rs[:,:Nterms].interpolate_axis(zs,axis=0,kind=interpolation,
                                                    bounds_error=False,extrapolate=True)
     Ps=self.Ps[:,:Nterms].interpolate_axis(zs,axis=0,kind=interpolation,
                                                    bounds_error=False,extrapolate=True)
     
     #`rs` and `ps` can safely remain as arrays for `invres`
     rs=(Rs*ws_grid).flatten()
     ps=Ps.flatten()
     
     k0=numpy.sum(rs/ps).tolist()
     
     #Rescale units so their order of magnitude centers around 1
     rscaling=numpy.exp(-(numpy.log(numpy.abs(rs).max())+\
                         numpy.log(numpy.abs(rs).min()))/2.)
     pscaling=numpy.exp(-(numpy.log(numpy.abs(ps).max())+\
                          numpy.log(numpy.abs(ps).min()))/2.)
     root_scaling=1/pscaling
     #rscaling=1
     #pscaling=1
     if self.verbose:
         Logger.write('\tScaling residues by a factor %1.2e to reduce floating point overflow...'%rscaling)
         Logger.write('\tScaling poles by a factor %1.2e to reduce floating point overflow...'%pscaling)
     rs*=rscaling; ps*=pscaling
     k0*=rscaling/pscaling
     signals=signals*rscaling/pscaling
     
     #highest order first in `Ps` and `Qs`
     #VERY SLOW - about 100ms on practical inversions (~60 terms)
     As,Bs=invres(rs, ps, k=[k0], tol=1e-16, rtype='avg') #tol=1e-16 is the smallest allowable to `unique_roots`..
     
     dtype=numpy.complex128 #Double precision offers noticeable protection against overflow
     As=numpy.array(As,dtype=dtype)
     Bs=numpy.array(Bs,dtype=dtype)
     signals=signals.astype(dtype)
     
     #import time
     
     betas=[]
     for i,signal in enumerate(signals):
         #t1=time.time()
         
         #Root finding `roots` seems to give noisy results when `Bs` has degree >84, with dynamic range ~1e+/-30 in coefficients...
         #Pretty fast - 5-9 ms on practical inversions with rank ~60 companion matrices, <1 ms with ~36 terms
         #@TODO: Root finding chokes on `Nterms=9` (number of eigenfields) and `Nts=12` (number of nodes),
         #       necessary for truly converged S3 on resonant phonons, probably due to
         #       floating point overflow - leading term increases exponentially with
         #       number of terms, leading to huge dynamic range.
         #       Perhaps limited by the double precision of DGEEV.
         #       So, replace with faster / more reliable root finder?
         #       We need 1) speed, 2) ALL roots (or at least the first ~10 smallest)
         poly=As-signal*Bs
         roots=find_roots(poly,scaling=scaling)
         roots=roots[roots.imag>0]
         roots*=root_scaling #since all beta units scaled by `pscaling`, undo that here
         
         #print time.time()-t1
         
         #How should we select the most likely beta among the multiple solutions?
         #1. Avoids large changes in value of beta
         if select_by=='difference' and i>=1:
             if i==1 and self.verbose:
                 Logger.write('\tSelecting remaining roots by minimizing differences with prior...')
             to_minimize=numpy.abs(roots-betas[i-1])
             
         #2. Avoids large changes in slope of beta (best for spectroscopy)
         #Nearly guarantees good beta spectrum, with exception of very loosely sampled SiC spectrum
         #Loosely samples SiO2-magnitude phonons still perfectly fine
         elif select_by=='continuity' and i>=2:
             if i==2 and self.verbose:
                 Logger.write('\tSelecting remaining roots by ensuring continuity with prior...')
             earlier_diff=betas[i-1]-betas[i-2]
             current_diffs=roots-betas[i-1]
             to_minimize=numpy.abs(current_diffs-earlier_diff)
             
         #3. Select specifically which pole we want |beta| to be closest to
         else:
             if i==0 and self.verbose:
                 Logger.write('\tSeeding inversion closest to pole %i...'%closest_pole)
             reordering=numpy.argsort(numpy.abs(roots)) #Order the roots towards increasing beta
             roots=roots[reordering]
             to_minimize=numpy.abs(closest_pole-numpy.arange(len(roots)))
             
         beta=roots[to_minimize==to_minimize.min()].squeeze()
         betas.append(beta)
         if not i%5 and self.verbose:
             Logger.write('\tProgress: %1.2f%%  -  Inverted %i signals of %i.'%\
                                  (((i+1)/numpy.float(len(signals))*100),\
                                   (i+1),len(signals)))
     
     betas=AWA(betas); betas.adopt_axes(signals)
     betas=betas.squeeze()
     if not betas.ndim: betas=betas.tolist()
     
     return betas
Example #22
0
def PlotIdealProbePerformance(ideal_length=404,wavelength=6e3,lift=30,\
                              approach=False,compare_spectra=True):

    freq = 30 / wavelength
    tip.LRM.geometric_params['L'] = ideal_length

    if approach:
        gold=tip.LRM(1180,rp=mat.Au.reflection_p,a=30,amplitude=60,\
                     normalize_to=mat.Si.reflection_p,Nqs=72,normalize_at=1738,Nzs=30,\
                     load_freq=freq)
        p = numpy.exp(-1j * numpy.angle(gold['norm_signals'].cslice[120]))

        figure()
        approach = gold['sample_signal_v_time']
        approach *= p
        peak = abs(approach).max()
        approach /= peak

        approach += numpy.cos(2 * numpy.pi * tip.LRM.ts) * 10 + 50

        numpy.abs(approach).plot(label='Approach on $Au$')
        plot(-1 - tip.LRM.ts, numpy.abs(approach), color='b')
        plot(-1 + tip.LRM.ts, numpy.abs(approach), color='b')
        plot(-tip.LRM.ts, numpy.abs(approach), color='b')
        plot(1 - tip.LRM.ts, numpy.abs(approach), color='b')
        plot(1 + tip.LRM.ts, numpy.abs(approach), color='b')

        approximation = gold['sample_signal_0'] / 2. + gold[
            'sample_signal_1'] * numpy.cos(2 * numpy.pi * tip.LRM.ts)
        approximation *= p
        approximation = AWA(approximation, axes=[tip.LRM.ts])
        approximation /= peak

        numpy.abs(approximation).plot(label='1st Harmonic')
        plot(-1 - tip.LRM.ts, numpy.abs(approximation), color='g', ls='-')
        plot(-1 + tip.LRM.ts, numpy.abs(approximation), color='g', ls='-')
        plot(-tip.LRM.ts, numpy.abs(approximation), color='g', ls='-')
        plot(1 - tip.LRM.ts, numpy.abs(approximation), color='g', ls='-')
        plot(1 + tip.LRM.ts, numpy.abs(approximation), color='g', ls='-')

        xlabel('t/T')
        ylabel('Near-field Signal [a.u.]')
        leg = legend(loc='upper right', fancybox=True, shadow=True)
        for t in leg.texts:
            t.set_fontsize(18)
        leg.get_frame().set_linewidth(.1)
        grid()
        tight_layout()
        ylim(.35, 1.18)

    if compare_spectra:
        freqs = numpy.linspace(1100, 1900, 200)

        layer = mat.LayeredMedia((mat.PMMA, 100e-7), exit=mat.Si)
        PMMA=tip.LRM(freqs,rp=layer.reflection_p,a=30,amplitude=60+lift/2.,\
                     normalize_to=mat.Si.reflection_p,Nqs=72,normalize_at=1000,Nzs=30,\
                     load_freq=freq)
        figure()
        PMMA['signal_2'].imag.plot(color='b')
        ylabel(r'$\mathrm{Im}[\,S_2\,]\,[\mathrm{norm.}]$', color='b')
        tick_params(color='b', axis='y')
        yticks(color='b')
        xlabel(r'$\omega\,[cm^{-1}]$')

        gca().spines['left'].set_color('b')
        gca().spines['right'].set_color('r')

        p = numpy.angle(PMMA['norm_signals'].cslice[0])
        DCSD1 = get_DCSD(PMMA['signals']) * numpy.exp(-1j * p)
        DCSD2 = get_DCSD(PMMA['signals'], lift=lift) * numpy.exp(-1j * p)
        twinx()
        (DCSD1.imag / numpy.abs(DCSD2)).plot(color='r')
        ylabel(r'$\mathrm{DCSD}\,[\mathrm{norm.}]$', color='r', rotation=270)
        tick_params(color='r', axis='y')
        yticks(color='r')

        tight_layout()
        subplots_adjust(right=.85)
 def __init__(self,zs=numpy.logspace(-3,2,400),Nterms_max=20,sort=True,
             *args,**kwargs):
     """For some reason using Nqs>=244, getting higher q-resolution,
     only makes more terms relevant, requiring twice as many terms for
     stability and smoothness in approach curves...
     (although overall """
     
     self.zs=zs
     self.Nterms_max=Nterms_max
     
     #Take a look at current probe geometry
     #Adjust quadrature parameters that assist in yielding smooth residues/poles
     #Pre-determined as good values for Nterms=10
     geometry=tip.LRM.geometric_params['geometry']
     if self.verbose:
         Logger.write('Setting up eigenfield tip model for geometry "%s"...'\
                      %geometry)
     if geometry is 'cone': tip.LRM.quadrature_params['b']=.5
     elif geometry is 'hyperboloid': tip.LRM.quadrature_params['b']=.5
     
     global Rs,Ps
     Rs=[]
     Ps=[]
     for i,z in enumerate(zs):
         
         if i==0: kwargs['reload_signal']=True
         elif i==1: kwargs['reload_signal']=False
         
         d=get_tip_eigenbasis_expansion(z,a=1,*args,**kwargs)
         
         Rrow=d['Rs']
         Prow=d['Ps']
         Rrow[numpy.isnan(Rrow)]=Rrow[numpy.isfinite(Rrow)][-1]
         Prow[numpy.isnan(Prow)]=Prow[numpy.isfinite(Prow)][-1]
         
         Rrow=Rrow[Prow.real>0]
         Prow=Prow[Prow.real>0]
         
         where_unphys_Ps=(Prow.imag>0)
         Prow[where_unphys_Ps]-=1j*Prow[where_unphys_Ps].imag
         
         Prow=Prow[:50]
         Rrow=Rrow[:50]
         
         if i>1 and sort:
             #Ensure continuity with previous poles (could have done residues instead, but this works)
             sorting=numpy.array([numpy.argmin(
                                         numpy.abs((Prow-previous_P)-\
                                                   (previous_P-preprevious_P)))
                                  for previous_P,preprevious_P in zip(previous_Prow,preprevious_Prow)])
             Prow=Prow[sorting]
             Rrow=Rrow[sorting]
         
         Rs.append(Rrow[:Nterms_max])
         Ps.append(Prow[:Nterms_max])
         
         #Make sure to keep a reference to previous set of poles and residues
         if sort:
             previous_Prow=Prow
             previous_Rrow=Rrow
             if i>=1:
                 preprevious_Prow=previous_Prow
                 preprevious_Rrow=previous_Rrow
     
     terms=numpy.arange(Nterms_max)+1
     self.Rs=AWA(Rs,axes=[zs,terms],axis_names=['z/a','Term'])
     self.Ps=AWA(Ps,axes=[zs,terms],axis_names=['z/a','Term'])
     
     ##Remove `nan`s from poles and residues##
     #Best way to remove `nan`s (=huge values) is to replace with largest finite value
     #Largest such value in Rs will be found in ratio to that in Ps, implying that
     #beta in denominator of that term is irrelevant, so term+offset goes to zero anyway..
     
     for j in range(Nterms_max):
         
         Rrow=self.Rs[:,j]; Prow=self.Ps[:,j]
         Rrow[numpy.isnan(Rrow)]=Rrow[numpy.isfinite(Rrow)][-1] #Highest value will be for largest z (end of array)
         Prow[numpy.isnan(Prow)]=Prow[numpy.isfinite(Prow)][-1]
     
     ##Remove any positive imaginary part from poles##
     #These are unphysical and are perhaps a by-product of inaccurate eigenvalues
     #when diagonalizing an almost-singular matrix (i.e. g*Lambda)
     #Just put them on the real line, at least it doesn't seem to hurt anything, at most it's more physical.
     where_unphys_Ps=(self.Ps.imag>0)
     self.Ps[where_unphys_Ps]-=1j*self.Ps[where_unphys_Ps].imag
     
     if self.verbose: Logger.write('\tDone.')