def get_Erad(self,beta,zs=None,Nterms=None,interpolation='linear'):
     
     if hasattr(beta,'__len__'):
         if isinstance(beta,numpy.ndarray):
             if not beta.ndim: beta=beta.tolist()
         else: beta=numpy.array(beta)
     
     if isinstance(beta,numpy.ndarray): beta=beta.reshape((len(beta),1,1))
     
     if zs is None: zs=self.zs
     Rs=self.evaluate_residues(zs,Nterms)#,interpolation)
     Ps=self.evaluate_poles(zs,Nterms)#,interpolation)
     
     #Should broadcast over freqs if beta has an additional first axis
     #The offset term is absolutely critical, offsets false z-dependence arising from first terms
     approach=numpy.sum(Rs*(1/(beta-Ps)+1/Ps),axis=-1)
     
     axes=[zs]; axis_names=['z/a']
     if hasattr(beta,'__len__'):
         approach=approach.transpose()
         if isinstance(beta,AWA):
             axes=axes+[beta.axes[0]]
             axis_names=axis_names+[beta.axis_names[0]]
         else:
             axes=axes+[None]
             axis_names=axis_names+[None]
     
     signals=AWA(approach,axes=axes,axis_names=axis_names).squeeze()
     if not signals.ndim: signals=signals.tolist()
     
     return signals
 def get_signal_from_nodes(self,beta,nodes=[(0,1)],Nterms=None,interpolation='linear'):
     
     if not hasattr(beta,'__len__'): beta=[beta]
     if not isinstance(beta,AWA): beta=AWA(beta)
     
     #`Frequency` axis will be first
     if isinstance(beta,numpy.ndarray): beta=beta.reshape((len(beta),1,1))
     
     #Weights apply across z-values
     zs,ws=list(zip(*nodes))
     ws_grid=numpy.array(ws).reshape((1,len(ws),1))
     zs=numpy.array(zs)
     
     #Evaluate at all nodal points
     Rs=self.evaluate_residues(zs,Nterms,interpolation)
     Ps=self.evaluate_poles(zs,Nterms,interpolation)
     
     #Should broadcast over freqs if beta has an additional first axis
     #The offset term is absolutely critical, offsets false z-dependence arising from first terms
     approach=numpy.sum(numpy.sum(Rs*(1/(beta-Ps)+1/Ps)*ws_grid,axis=-1),axis=-1)#+Rs/Ps,axis=-1)
     
     axes=[zs]; axis_names=['z/a']
     if hasattr(beta,'__len__'):
         approach=approach.transpose()
         if isinstance(beta,AWA):
             axes=axes+[beta.axes[0]]
             axis_names=axis_names+[beta.axis_names[0]]
         else:
             axes=axes+[None]
             axis_names=axis_names+[None]
     
     signals=AWA(approach); signals.adopt_axes(beta)
     signals=signals.squeeze()
     if not signals.ndim: signals=signals.tolist()
     
     return signals
 def invert_signal(self,signals,nodes=[(1,0)],Nterms=10,\
                   interpolation='linear',\
                   select_by='continuity',\
                   closest_pole=0,\
                   scaling=10):
     """The inversion is not unique, consequently the selected solution
     will probably be wrong if signal values correspond with 
     "beta" values that are too large (`|beta|~>min{|Poles|}`).
     This can be expected to break at around `|beta|>2`."""
     #Default is to invert signal in contact
     #~10 terms seem required to converge on e.g. SiO2 spectrum,
     #especially on the Re(beta)<0 (low signal) side of phonons
     
     global roots,poly,root_scaling
     
     #global betas,all_roots,pmin,rs,ps,As,Bs,roots,to_minimize
     if self.verbose:
         Logger.write('Inverting `signals` based on the provided `nodes` to obtain consistent beta values...')
     
     if not hasattr(signals,'__len__'): signals=[signals]
     if not isinstance(signals,AWA): signals=AWA(signals)
     
     zs,ws=list(zip(*nodes))
     ws_grid=numpy.array(ws).reshape((len(ws),1)) #last dimension is to broadcast over all `Nterms` equally
     zs=numpy.array(zs)
     
     Rs=self.Rs[:,:Nterms].interpolate_axis(zs,axis=0,kind=interpolation,
                                                    bounds_error=False,extrapolate=True)
     Ps=self.Ps[:,:Nterms].interpolate_axis(zs,axis=0,kind=interpolation,
                                                    bounds_error=False,extrapolate=True)
     
     #`rs` and `ps` can safely remain as arrays for `invres`
     rs=(Rs*ws_grid).flatten()
     ps=Ps.flatten()
     
     k0=numpy.sum(rs/ps).tolist()
     
     #Rescale units so their order of magnitude centers around 1
     rscaling=numpy.exp(-(numpy.log(numpy.abs(rs).max())+\
                         numpy.log(numpy.abs(rs).min()))/2.)
     pscaling=numpy.exp(-(numpy.log(numpy.abs(ps).max())+\
                          numpy.log(numpy.abs(ps).min()))/2.)
     root_scaling=1/pscaling
     #rscaling=1
     #pscaling=1
     if self.verbose:
         Logger.write('\tScaling residues by a factor %1.2e to reduce floating point overflow...'%rscaling)
         Logger.write('\tScaling poles by a factor %1.2e to reduce floating point overflow...'%pscaling)
     rs*=rscaling; ps*=pscaling
     k0*=rscaling/pscaling
     signals=signals*rscaling/pscaling
     
     #highest order first in `Ps` and `Qs`
     #VERY SLOW - about 100ms on practical inversions (~60 terms)
     As,Bs=invres(rs, ps, k=[k0], tol=1e-16, rtype='avg') #tol=1e-16 is the smallest allowable to `unique_roots`..
     
     dtype=numpy.complex128 #Double precision offers noticeable protection against overflow
     As=numpy.array(As,dtype=dtype)
     Bs=numpy.array(Bs,dtype=dtype)
     signals=signals.astype(dtype)
     
     #import time
     
     betas=[]
     for i,signal in enumerate(signals):
         #t1=time.time()
         
         #Root finding `roots` seems to give noisy results when `Bs` has degree >84, with dynamic range ~1e+/-30 in coefficients...
         #Pretty fast - 5-9 ms on practical inversions with rank ~60 companion matrices, <1 ms with ~36 terms
         #@TODO: Root finding chokes on `Nterms=9` (number of eigenfields) and `Nts=12` (number of nodes),
         #       necessary for truly converged S3 on resonant phonons, probably due to
         #       floating point overflow - leading term increases exponentially with
         #       number of terms, leading to huge dynamic range.
         #       Perhaps limited by the double precision of DGEEV.
         #       So, replace with faster / more reliable root finder?
         #       We need 1) speed, 2) ALL roots (or at least the first ~10 smallest)
         poly=As-signal*Bs
         roots=find_roots(poly,scaling=scaling)
         roots=roots[roots.imag>0]
         roots*=root_scaling #since all beta units scaled by `pscaling`, undo that here
         
         #print time.time()-t1
         
         #How should we select the most likely beta among the multiple solutions?
         #1. Avoids large changes in value of beta
         if select_by=='difference' and i>=1:
             if i==1 and self.verbose:
                 Logger.write('\tSelecting remaining roots by minimizing differences with prior...')
             to_minimize=numpy.abs(roots-betas[i-1])
             
         #2. Avoids large changes in slope of beta (best for spectroscopy)
         #Nearly guarantees good beta spectrum, with exception of very loosely sampled SiC spectrum
         #Loosely samples SiO2-magnitude phonons still perfectly fine
         elif select_by=='continuity' and i>=2:
             if i==2 and self.verbose:
                 Logger.write('\tSelecting remaining roots by ensuring continuity with prior...')
             earlier_diff=betas[i-1]-betas[i-2]
             current_diffs=roots-betas[i-1]
             to_minimize=numpy.abs(current_diffs-earlier_diff)
             
         #3. Select specifically which pole we want |beta| to be closest to
         else:
             if i==0 and self.verbose:
                 Logger.write('\tSeeding inversion closest to pole %i...'%closest_pole)
             reordering=numpy.argsort(numpy.abs(roots)) #Order the roots towards increasing beta
             roots=roots[reordering]
             to_minimize=numpy.abs(closest_pole-numpy.arange(len(roots)))
             
         beta=roots[to_minimize==to_minimize.min()].squeeze()
         betas.append(beta)
         if not i%5 and self.verbose:
             Logger.write('\tProgress: %1.2f%%  -  Inverted %i signals of %i.'%\
                                  (((i+1)/numpy.float(len(signals))*100),\
                                   (i+1),len(signals)))
     
     betas=AWA(betas); betas.adopt_axes(signals)
     betas=betas.squeeze()
     if not betas.ndim: betas=betas.tolist()
     
     return betas