Esempio n. 1
0
    def show(self,image):
     pylab.clf()
     zmin=-2.0*pylab.rms_flat(image)
     zmax=5.0*pylab.rms_flat(image)
#    pylab.imshow(image,interpolation='bilinear', origin='lower', cmap=pylab.cm.Greys, vmin=zmin, vmax=zmax)
     pylab.imshow(image,interpolation='bilinear', origin='lower', cmap=pylab.cm.RdYlBu_r, vmin=zmin, vmax=zmax)
     pylab.colorbar()
Esempio n. 2
0
 def ishow(self,plane=0): #show current image
  pylab.clf()
  image=self.b[:,:,plane,0]
  zmin=-2.0*pylab.rms_flat(image) 
  zmax=5.0*pylab.rms_flat(image)
  pylab.imshow(image,interpolation='bilinear', origin='lower', cmap=pylab.cm.Greys, vmin=zmin, vmax=zmax)
  pylab.colorbar()
Esempio n. 3
0
 def CloudsFromImage(self, powerSpec=None, randomSeed=None, normfile=None):
     """Compute clouds based on frequency analysis of an IR image"""
     ## for this temp version sampling at 240 imperative (interpolation was too slow and removed for now)
     # get power spectrum from an IR image - normalization for gray ext. see notes
     ## reference here !!
     # get a 2D random gaussien noise with rms = 1
     if powerSpec == None:
         powerSpec = self.powerSpec
     if randomSeed != None:
         numpy.random.seed(randomSeed)
     noise2D = numpy.random.normal(
         numpy.zeros(self.sampling * self.sampling),
         1.).reshape(self.sampling, self.sampling)
     # a realization is given in Fourier space calculating tf(noise)*sqrt(powerspectum)
     fourierclouds = fftpack.fft2(noise2D) * numpy.sqrt(powerSpec)
     # then, inverse Fourier transform to get clouds
     self.clouds = numpy.real(fftpack.ifft2(fourierclouds))
     # get 0 abs as minimum alteration of image
     mins = []
     maxs = []
     for i in range(self.sampling):
         mins = numpy.append(mins, min(self.clouds[i, :]))
         maxs = numpy.append(maxs, max(self.clouds[i, :]))
     self.clouds = self.clouds - min(mins)
     ## correct for normalizations
     if normfile == None:
         normfile = os.path.join(os.getenv('ATMOSPHERE_CLOUDS_DIR'),
                                 'data/1104-batch1_im.txt')
         print(normfile)
     init_im = numpy.loadtxt(normfile)
     self.clouds = self.clouds / rms_flat(self.clouds) * rms_flat(init_im)
Esempio n. 4
0
    def disp(self,
             image,
             plane=None,
             cube=None,
             xlabel='',
             ylabel=''):  #cube arg applies to image cube of shape (:,:,1:,n)
        #cube arg specifies nth slice of cube
        if (plane != None) & (
                cube != None):  #choose both plane and cube, if ever applicable
            image = image[:, :, plane, cube]
        elif plane != None:  #select plane of image
            image = image[:, :, plane, 0]
        elif cube != None:  #select section of image cube
            image = image[:, :, 0, cube]
        zmin = -2.0 * pylab.rms_flat(image)
        zmax = 5.0 * pylab.rms_flat(image)

        pylab.clf()
        pylab.xlabel(xlabel)
        pylab.ylabel(ylabel)
        pylab.imshow(image,
                     interpolation='bilinear',
                     origin='lower',
                     cmap=pylab.cm.Greys,
                     vmin=zmin,
                     vmax=zmax)
Esempio n. 5
0
 def rshow(self): #show current residual
  pylab.clf()
  image=self.resid
  zmin=-2.0*pylab.rms_flat(image)
  zmax=5.0*pylab.rms_flat(image)
  pylab.imshow(image,interpolation='bilinear', origin='lower', cmap=pylab.cm.Greys, vmin=zmin, vmax=zmax)
  pylab.colorbar()
Esempio n. 6
0
 def CloudsFromImage(self, powerSpec=None, randomSeed=None, normfile=None):
     """Compute clouds based on frequency analysis of an IR image"""
     ## for this temp version sampling at 240 imperative (interpolation was too slow and removed for now)        
     # get power spectrum from an IR image - normalization for gray ext. see notes
     ## reference here !!
     # get a 2D random gaussien noise with rms = 1
     if powerSpec == None:
         powerSpec = self.powerSpec
     if randomSeed != None:
         numpy.random.seed(randomSeed)
     noise2D = numpy.random.normal(numpy.zeros(self.sampling*self.sampling), 1.).reshape(self.sampling, self.sampling)        
     # a realization is given in Fourier space calculating tf(noise)*sqrt(powerspectum)
     fourierclouds = fftpack.fft2(noise2D)*numpy.sqrt(powerSpec)
     # then, inverse Fourier transform to get clouds
     self.clouds = numpy.real(fftpack.ifft2(fourierclouds))
     # get 0 abs as minimum alteration of image  
     mins=[]
     maxs=[]
     for i in range(self.sampling):
         mins = numpy.append(mins, min(self.clouds[i,:]))
         maxs = numpy.append(maxs, max(self.clouds[i,:]))
     self.clouds = self.clouds - min(mins)
     ## correct for normalizations 
     if normfile == None:
         normfile = os.path.join(os.getenv('ATMOSPHERE_CLOUDS_DIR'), 'data/1104-batch1_im.txt')
         print normfile
     init_im = numpy.loadtxt(normfile)
     self.clouds = self.clouds/rms_flat(self.clouds)*rms_flat(init_im)
Esempio n. 7
0
def evaluate_model(mod, comment='', data_fname='missing_noisy_data.csv', truth_fname='data.csv'):
    """ Run specified model on existing data (data.csv / missing_noisy_data.csv) and save results in dev_log.csv
    Existing models: %s """ % data_run_models
    if mod not in data_run_models.split(' '):
        raise TypeError, 'Unrecognized model "%s"; must be one of %s' % (mod, data_run_models)

    import model
    reload(model)

    print 'loading data'
    data = pl.csv2rec(data_fname)
    truth = pl.csv2rec(truth_fname)
    
    t0 = time.time()
    print 'generating model'
    mod_mc = eval('model.%s(data)' % mod)

    print 'fitting model with mcmc'
    mod_mc.sample(10000, 5000, 50, verbose=1)
    t1 = time.time()

    print 'summarizing results'

    import graphics
    reload(graphics)
    pl.figure(figsize=(22, 17), dpi=300)
    pl.clf()
    graphics.plot_all_predictions_over_time(data, mod_mc.predicted, more_data=truth)

    data_stats = mod_mc.data_predicted.stats()
    i_out = [i for i in range(len(data)) if pl.isnan(data.y[i])]
    rmse_abs_out = pl.rms_flat(truth.y[i_out] - data_stats['mean'][i_out])
    rmse_rel_out = 100*pl.rms_flat(1. - data_stats['mean'][i_out]/truth.y[i_out])

    i_in = [i for i in range(len(data)) if not pl.isnan(data.y[i])]
    rmse_abs_in = pl.rms_flat(truth.y[i_in] - data_stats['mean'][i_in])
    rmse_rel_in = 100*pl.rms_flat(1. - data_stats['mean'][i_in]/truth.y[i_in])

    param_stats = mod_mc.param_predicted.stats()
    coverage = 100*pl.sum((truth.y[i_out] >= param_stats['95% HPD interval'][i_out, 0]) & (truth.y[i_out] <= param_stats['95% HPD interval'][i_out, 1])) / float(len(i_out))

    import md5
    data_hash = md5.md5(data).hexdigest()
    results = [mod, t1-t0, rmse_abs_out, rmse_rel_out, rmse_abs_in, rmse_rel_in, coverage,
               len(data), len(pl.unique(data.region)), len(pl.unique(data.country)), len(pl.unique(data.year)), len(pl.unique(data.age)), data_hash,
               t0, comment]
    print '%s: time: %.0fs out-of-samp rmse abs=%.1f rel=%.0f in-samp rmse abs=%.1f rel=%.0f coverage=%.0f\ndata: %d rows; %d regions, %d countries %d years %d ages [data hash: %s]\n(run conducted at %f)\n%s' % tuple(results)

    pl.savefig('/home/j/Project/Models/space-time-smoothing/images/%s.png' % t0)  # FIXME: don't hardcode path for saving images

    import csv
    f = open('dev_log.csv', 'a')
    f_csv = csv.writer(f)
    f_csv.writerow(results)
    f.close()

    return mod_mc
Esempio n. 8
0
 def show(self, image):
     pylab.clf()
     zmin = -2.0 * pylab.rms_flat(image)
     zmax = 5.0 * pylab.rms_flat(image)
     #    pylab.imshow(image,interpolation='bilinear', origin='lower', cmap=pylab.cm.Greys, vmin=zmin, vmax=zmax)
     pylab.imshow(image,
                  interpolation='bilinear',
                  origin='lower',
                  cmap=pylab.cm.RdYlBu_r,
                  vmin=zmin,
                  vmax=zmax)
     pylab.colorbar()
Esempio n. 9
0
 def ishow(self, plane=0):  #show current image
     pylab.clf()
     image = self.b[:, :, plane, 0]
     zmin = -2.0 * pylab.rms_flat(image)
     zmax = 5.0 * pylab.rms_flat(image)
     pylab.imshow(image,
                  interpolation='bilinear',
                  origin='lower',
                  cmap=pylab.cm.Greys,
                  vmin=zmin,
                  vmax=zmax)
     pylab.colorbar()
Esempio n. 10
0
 def rshow(self):  #show current residual
     pylab.clf()
     image = self.resid
     zmin = -2.0 * pylab.rms_flat(image)
     zmax = 5.0 * pylab.rms_flat(image)
     pylab.imshow(image,
                  interpolation='bilinear',
                  origin='lower',
                  cmap=pylab.cm.Greys,
                  vmin=zmin,
                  vmax=zmax)
     pylab.colorbar()
Esempio n. 11
0
    def disp(self,image,plane=None,cube=None,xlabel='',ylabel=''): #cube arg applies to image cube of shape (:,:,1:,n)
                                      #cube arg specifies nth slice of cube
     if (plane!=None) & (cube!=None): #choose both plane and cube, if ever applicable
      image=image[:,:,plane,cube]
     elif plane!=None: #select plane of image
      image=image[:,:,plane,0]
     elif cube!=None: #select section of image cube
      image=image[:,:,0,cube]
     zmin=-2.0*pylab.rms_flat(image)
     zmax=5.0*pylab.rms_flat(image)

     pylab.clf()
     pylab.xlabel(xlabel)
     pylab.ylabel(ylabel)
     pylab.imshow(image,interpolation='bilinear', origin='lower', cmap=pylab.cm.Greys, vmin=zmin, vmax=zmax)
Esempio n. 12
0
 def subtract(self,plane=0):
  resid=[]
  b=self.b[:,:,plane,0]
  m=self.m[:,:,0]
  s=numarray.shape(b)
  for x in range(s[0]):
   resid.append([])
   for y in range(s[1]):
    resid[x].append(0.0) #note the floating point: very important!
  self.resid=pylab.array(resid)
  for x in range(s[0]):
   for y in range(s[1]):
    self.resid[x,y]=b[x,y]-m[x,y]
  self.show(self.resid)
  rms=pylab.rms_flat(self.resid)
  min1,max1=self.min_max(self.resid)
  self.show(self.resid)
  print 'rms of residual image: %f'%(rms)
  #write to web page
  if self.write:
   header='Residual from plane%d of image %s'%(plane,self.imageName)
   body1=['<pre>The image generated with pylab:</pre>']
   body2=['<pre>maximum: %f</pre>'%(max1),'<pre>minimum: %f</pre>'%(min1),'<pre>rms: %f</pre>'%(rms)]
   saveDir=self.imDir+self.fname[11:-5]+'-resid%d.png'%(plane)
   pylab.savefig(saveDir)
   self.htmlPub.doBlk(body1, body2, saveDir,header)
  #return rms
  return rms
Esempio n. 13
0
def balance_signal(sig, balance_type="maxabs"):
    """
    ::

        Perform signal balancing using:
          rms - root mean square
          max - maximum value
          maxabs - maximum absolute value
          norm - Euclidean norm
          none - do nothing [default]

        Returns:
          sig - balanced (normalized) signal
    """
    balance_types = ['rms', 'max', 'maxabs', 'norm', 'none']
    if balance_type == balance_types[0]:
        return sig / pylab.rms_flat(sig)
    if balance_type == balance_types[1]:
        return sig / sig.max()
    if balance_type == balance_types[2]:
        return sig / abs(sig).max()
    if balance_type == balance_types[3]:
        return sig / pylab.norm_flat(sig)
    if balance_type == balance_types[4]:
        return sig
    raise TestSignalError(
        "signal balancing type not supported: %s" % balance_type)
Esempio n. 14
0
    def simple_stats(self,sigma=10,plane=0):
        nchan=self.imTool.shape()[3]
        rmsmax=0
        rms1=0
        chan=0
        
        for k in range(0,nchan/2+1):
            rms1=pylab.rms_flat(self.b[:,:,plane,k])
            if(rms1 > rmsmax):
                rmsmax=rms1
                chan=k
        
	rms1=rmsmax
	min1,max1=self.min_max(self.b[:,:,plane,chan])
        self.show(self.b[:,:,plane,chan])
	if self.write:
	 header='Channel %d pol %d from image %s'%(chan,plane,self.imageName)
         body1=['The image generated with pylab:']
	 body2=['maximum: %f'%(max1),'minimum: %f'%(min1),'rms: %f'%(rms1)]
         #saveDir=self.imDir+self.fname[11:-5]+'-channel%d-pol%d.png'%(chan,plane)
         listnam=string.split(self.imTool.name(strippath=False), '/')
         imnam=listnam[len(listnam)-2]+'_'+listnam[len(listnam)-1]
         saveDir=self.imDir+imnam+'-channel%d-pol%d.png'%(chan,plane)
         pylab.savefig(saveDir)
         self.htmlPub.doBlk(body1, body2, saveDir,header)
        returnFlag= 1
        if(rms1 > 2*sigma): returnFlag=-1
        return rms1, max1, min1, returnFlag
Esempio n. 15
0
def balance_signal(sig, balance_type="maxabs"):
    """
    ::
    
        Perform signal balancing using:
          rms - root mean square
          max - maximum value
          maxabs - maximum absolute value
          norm - Euclidean norm
          none - do nothing [default]
        
        Returns:
          sig - balanced (normalized) signal
    """
    balance_types = ['rms', 'max', 'maxabs', 'norm', 'none']
    if balance_type == balance_types[0]:
        return sig / pylab.rms_flat(sig)
    if balance_type == balance_types[1]:
        return sig / sig.max()
    if balance_type == balance_types[2]:
        return sig / abs(sig).max()
    if balance_type == balance_types[3]:
        return sig / pylab.norm_flat(sig)
    if balance_type == balance_types[4]:
        return sig
    raise TestSignalError("signal balancing type not supported: %s" %
                          balance_type)
Esempio n. 16
0
    def simple_stats(self, sigma=10, plane=0):
        nchan = self.imTool.shape()[3]
        rmsmax = 0
        rms1 = 0
        chan = 0

        for k in range(0, nchan / 2 + 1):
            rms1 = pylab.rms_flat(self.b[:, :, plane, k])
            if (rms1 > rmsmax):
                rmsmax = rms1
                chan = k

        rms1 = rmsmax
        min1, max1 = self.min_max(self.b[:, :, plane, chan])
        self.show(self.b[:, :, plane, chan])
        if self.write:
            header = 'Channel %d pol %d from image %s' % (chan, plane,
                                                          self.imageName)
            body1 = ['The image generated with pylab:']
            body2 = [
                'maximum: %f' % (max1),
                'minimum: %f' % (min1),
                'rms: %f' % (rms1)
            ]
            #saveDir=self.imDir+self.fname[11:-5]+'-channel%d-pol%d.png'%(chan,plane)
            listnam = string.split(self.imTool.name(strippath=False), '/')
            imnam = listnam[len(listnam) - 2] + '_' + listnam[len(listnam) - 1]
            saveDir = self.imDir + imnam + '-channel%d-pol%d.png' % (chan,
                                                                     plane)
            pylab.savefig(saveDir)
            self.htmlPub.doBlk(body1, body2, saveDir, header)
        returnFlag = 1
        if (rms1 > 2 * sigma): returnFlag = -1
        return rms1, max1, min1, returnFlag
Esempio n. 17
0
 def subtract(self, plane=0):
     resid = []
     b = self.b[:, :, plane, 0]
     m = self.m[:, :, 0]
     s = numarray.shape(b)
     for x in range(s[0]):
         resid.append([])
         for y in range(s[1]):
             resid[x].append(0.0)  #note the floating point: very important!
     self.resid = pylab.array(resid)
     for x in range(s[0]):
         for y in range(s[1]):
             self.resid[x, y] = b[x, y] - m[x, y]
     self.show(self.resid)
     rms = pylab.rms_flat(self.resid)
     min1, max1 = self.min_max(self.resid)
     self.show(self.resid)
     print 'rms of residual image: %f' % (rms)
     #write to web page
     if self.write:
         header = 'Residual from plane%d of image %s' % (plane,
                                                         self.imageName)
         body1 = ['<pre>The image generated with pylab:</pre>']
         body2 = [
             '<pre>maximum: %f</pre>' % (max1),
             '<pre>minimum: %f</pre>' % (min1),
             '<pre>rms: %f</pre>' % (rms)
         ]
         saveDir = self.imDir + self.fname[11:-5] + '-resid%d.png' % (plane)
         pylab.savefig(saveDir)
         self.htmlPub.doBlk(body1, body2, saveDir, header)
     #return rms
     return rms
Esempio n. 18
0
    def auto_fitCube(self, image, verbose=0):
        x, y = [], []
        xlist, ylist = [], []
        max_rms = 0
        xp, yp = 0, 0
        diff = 0
        max_diff = 0
        s = numarray.shape(image)
        if s[0] > 100:
            #      xlist=range(.25*s[0],.75*s[0])
            xlist = range(s[0] / 4, 3 * s[0] / 4)
        else:
            xlist = range(s[0])
        if s[1] > 100:
            #      ylist=range(.25*s[1],.75*s[1],4) #later add the 4th pixel thing
            ylist = range(s[1] / 4, 3 * s[1] / 4, 4)
        else:
            ylist = range(s[1])
        for i in xlist:
            for j in ylist:
                x, y = self.drill(image, i, j)
                rms = pylab.rms_flat(y)
                diff = abs(pylab.max(y) - rms)
                if (diff > max_diff):
                    max_diff, xp, yp = diff, i, j
#      if rms>max_rms:
#          max_rms,xp,yp=rms,i,j
#          if verbose: print i,j,rms
        print 'optimum fit to [%d,%d]   rms=%.6f' % (xp, yp, max_rms)
        XY, fwhm = self.fitCube2(xp, yp)
        return XY, fwhm
Esempio n. 19
0
def noise(params=None, num_points=44100, filtered=True, modulated=True, noise_fun=pylab.rand):
    """
    ::

        Generate noise according to params dict
            params - parameter dict containing sr, and num_harmonics elements [None=default params]
            num_points - how many samples to generate [44100]
            filtered - set to True for filtered noise sequence [True]
            modulated - set to True for modulated noise sequence [True]
            noise_fun - the noise generating function [pylab.rand]
    """
    if params==None:
        params = default_noise_params()
    noise_dB = params['noise_dB']
    num_harmonics = params['num_harmonics']
    cf = params['cf']
    bw = params['bw']
    sr = params['sr']
    g = 10**(noise_dB/20.0)*noise_fun(num_points)
    if filtered or modulated:
        [b,a] = scipy.signal.filter_design.butter(4, bw*2*pylab.pi/sr, btype='low', analog=0, output='ba')
        g = scipy.signal.lfilter(b, a, g)

    if not modulated:
        # Additive noise
        s = harmonics(params, f0=cf, num_points=num_points)
        x = s + g
    else:
        # Phase modulation with *filtered* noise (side-band modulation should be narrow-band at bw)
        x = pylab.zeros(num_points)
        for i in pylab.arange(1,num_harmonics+1):
            x += pylab.exp(-0.5*i) * pylab.sin( (2.0*pylab.pi*cf*i / sr) * pylab.arange(num_points) + g)
    x /= pylab.rms_flat(x)
    return x
Esempio n. 20
0
    def auto_fitCube(self,image,verbose=0):
     x,y=[],[]
     xlist,ylist=[],[]
     max_rms=0
     xp,yp=0,0
     diff=0
     max_diff=0
     s=numarray.shape(image)
     if s[0]>100:
#      xlist=range(.25*s[0],.75*s[0])
      xlist=range(s[0]/4,3*s[0]/4)
     else:
      xlist=range(s[0])
     if s[1]>100:
#      ylist=range(.25*s[1],.75*s[1],4) #later add the 4th pixel thing
      ylist=range(s[1]/4,3*s[1]/4,4)
     else:
      ylist=range(s[1])
     for i in xlist:
      for j in ylist:
       x,y=self.drill(image,i,j)
       rms=pylab.rms_flat(y)
       diff=abs(pylab.max(y)-rms)
       if(diff > max_diff):
           max_diff,xp,yp=diff,i,j 
 #      if rms>max_rms:
 #          max_rms,xp,yp=rms,i,j
 #          if verbose: print i,j,rms
     print 'optimum fit to [%d,%d]   rms=%.6f'%(xp,yp,max_rms)
     XY,fwhm=self.fitCube2(xp,yp)
     return XY,fwhm
Esempio n. 21
0
    def findPeaks(self, y, x, plane=0):  #note inversion in x,y coord
        #use like findPeaks(x,y)
        #inversion should be self-consistent if take output as [x,y]
        r = 50  #search 'radius'
        rms = pylab.rms_flat(self.b[:, :, plane, 0])
        thold = rms  #limit of pixels to be considered

        val = []
        xp = []
        yp = []
        avoid = []
        data = []
        if (x == None):
            x = [len(self.b[:, 0, plane, 0]) / 2]
            y = [len(self.b[0, :, plane, 0]) / 2]
            r = min(x[0], y[0]) - 2
        for i in range(len(x)):  #len(x)==len(y)!
            v = 0
            max = 0
            x_p = 0
            y_p = 0
            for j in range(x[i] - r, x[i] + r):
                for k in range(y[i] - r, y[i] + r):
                    if [j, k] not in avoid:
                        v = self.b[j, k, plane, 0]
                        if v > max:
                            max = v
                            x_p = j
                            y_p = k
            val.append(max)
            xp.append(x_p)
            yp.append(y_p)
            avoid.append([x_p, y_p])  #ignores previous bright pixels
            #generate data sets
        for i in range(len(x)):
            vv = val[i]
            xx = xp[i]
            yy = yp[i]
            while vv > thold:
                xx += 1
                vv = self.b[xx, yp[i], plane, 0]
            vv = val[i]
            while vv > thold:
                yy += 1
                vv = self.b[xp[i], yy, plane, 0]
            dx = xx - xp[i]
            dy = yy - yp[i]
            r = (dx + dy) / 2
            if (r > 12):
                r = 12
            print 'PEAK pos, val and r ', xp[i], yp[i], val[i], r
            data.append([])
            for j in range(xp[i] - r, xp[i] + r):
                for k in range(yp[i] - r, yp[i] + r):
                    if (pylab.sqrt((xp[i] - j) * (xp[i] - j) + (yp[i] - k) *
                                   (yp[i] - k)) <= r) & (self.b[j, k, plane, 0]
                                                         > 0):
                        data[i].append(([j, k], self.b[j, k, plane, 0]))
        return data, xp, yp
Esempio n. 22
0
    def findPeaks(self,y,x,plane=0): #note inversion in x,y coord
 			     #use like findPeaks(x,y)
			     #inversion should be self-consistent if take output as [x,y]
     r=50 #search 'radius'
     rms=pylab.rms_flat(self.b[:,:,plane,0])
     thold=rms #limit of pixels to be considered

     val=[]
     xp=[]
     yp=[]
     avoid=[]
     data=[]
     if(x==None):
         x=[len(self.b[:,0,plane,0])/2]
         y=[len(self.b[0,:,plane,0])/2]
         r=min(x[0], y[0])-2
     for i in range(len(x)): #len(x)==len(y)!
      v=0
      max=0
      x_p=0
      y_p=0
      for j in range(x[i]-r,x[i]+r):
       for k in range(y[i]-r,y[i]+r):
        if [j,k] not in avoid:
         v=self.b[j,k,plane,0]
         if v>max:
          max=v
          x_p=j
          y_p=k
      val.append(max)
      xp.append(x_p)
      yp.append(y_p)
      avoid.append([x_p,y_p]) #ignores previous bright pixels
      #generate data sets
     for i in range(len(x)):
      vv=val[i]
      xx=xp[i]
      yy=yp[i]
      while vv>thold:
       xx+=1
       vv=self.b[xx,yp[i],plane,0]
      vv=val[i]
      while vv>thold:
       yy+=1
       vv=self.b[xp[i],yy,plane,0]
      dx=xx-xp[i]
      dy=yy-yp[i]
      r=(dx+dy)/2
      if(r >12):
          r=12
      print 'PEAK pos, val and r ', xp[i], yp[i],val[i],  r
      data.append([])
      for j in range(xp[i]-r,xp[i]+r):
       for k in range(yp[i]-r,yp[i]+r):
        if (pylab.sqrt((xp[i]-j)*(xp[i]-j)+(yp[i]-k)*(yp[i]-k)) <=r) & (self.b[j,k,plane,0]>0):
         data[i].append(([j,k],self.b[j,k,plane,0]))
     return data,xp,yp
Esempio n. 23
0
def harmonics(params=None, f0=441.0, afun=lambda x: pylab.exp(-0.5*x), num_points=44100, phase_offset=0):
    """
    ::

        Generate a harmonic series using a harmonic weighting function
         params - parameter dict containing sr, and num_harmonics elements
         afun   - a lambda function of one parameter (harmonic index) returning a weight
         num_points - how many samples to generate [44100]
         phase_offset - initial phase of the harmonic series
    """
    if params==None:
        params = default_signal_params()
    f0 = float(f0)
    sr = float(params['sr'])
    num_harmonics  = params['num_harmonics']
    x = pylab.zeros(num_points)
    for i in pylab.arange(1, num_harmonics+1):    
        x +=  afun(i) * sinusoid(params, f0=i*f0, num_points=num_points, phase_offset=i*phase_offset)
    x /= pylab.rms_flat(x)
    return x
Esempio n. 24
0
def shepard(params=None, f0=55, num_octaves=7, num_points=44100, phase_offset=0, center_freq=440, band_width=150):
    """
    ::

        Generate shepard tones
             params - parameter dict containing sr, and num_harmonics elements
             f0 - base frequency in Hertz of shepard tone [55]
             num_octaves - number of sinusoidal octave bands to generate [7]
             num_points - how many samples to generate [44100]
             phase_offset - initial phase offset for shepard tone
             center_freq - where the peak of the spectrum will be
             band_width - how wide a spectral band to use for shepard tones
    """
    if params==None:
        params = default_signal_params()
    x = pylab.zeros(num_points)
    shepard_weight = gauss_pdf(20000, center_freq, band_width)
    for i in pylab.arange(0, num_octaves):
        #afun=lambda x: ]
        a = shepard_weight[int(round(f0*2**i))]
        x += a * harmonics(params, f0=f0*2**i, num_points=num_points, phase_offset=phase_offset)
    x /= pylab.rms_flat(x)
    return x
Esempio n. 25
0
def qimplot(image=None, rmin=-2, rmax=2, cmap='gray'):
    """
    qimplot: Quick Image Plot
    Plots image in grayscale. Colorscale is from rmin*RMS to rmax*RMS.
    Defaults:
        rmin = -2
        rmax = +2
        cmap = 'gray'
            Can be any of the usual cmap values, e.g. 'YlOrRd' or 'jet'
    """
    logger = logging.getLogger('QIMPLOT')
    logger.info("Quick Image Plot")
    if image is None:
        logger.critical("Please provide input image!")
    pl.figure(figsize=(10, 10))
    fits = miriad('fits')
    if not os.path.exists(image):
        logger.critical(image + " not found!")
        sys.exit(0)
    fits.in_ = image
    fits.out = image + '.fits'
    fits.op = 'xyout'
    fits.go(rmfiles=True)
    imheader = pyfits.open(image + '.fits')
    imdata = imheader[0].data
    rms = pl.rms_flat(imdata[0, 0, :, :])
    logger.info('RMS = ' + "{:2.2}".format(rms))
    logger.info("Plotting from " + str(rmin) + "*RMS to " + str(rmax) +
                str("*RMS"))
    pl.imshow(pl.flipud(imdata[0, 0, :, :]),
              cmap=cmap,
              vmin=rmin * rms,
              vmax=rmax * rms)
    pl.colorbar()
    pl.xticks(())
    pl.yticks(())
Esempio n. 26
0
def devils_staircase(params, f0=441, num_octaves=7, num_steps=12, step_size=1, hop=4096, 
                     overlap=True, center_freq=440, band_width=150):
    """
    ::

        Generate an auditory illusion of an infinitely ascending/descending sequence of shepard tones
            params - parameter dict containing sr, and num_harmonics elements
            f0 - base frequency in Hertz of shepard tone [55]
            num_octaves - number of sinusoidal octave bands to generate [7]
            num_steps - how many steps to take in the staircase
            step_size - semitone change per step, can be fractional [1.]
            hop - how many points to generate per step
            overlap - whether the end-points should be cross-faded for overlap-add
            center_freq - where the peak of the spectrum will be
            band_width - how wide a spectral band to use for shepard tones
    """
    sr = params['sr']
    norm_freq = 2*pylab.pi/sr
    wlen = min([hop/2, 2048])
    print wlen
    x = pylab.zeros(num_steps*hop+wlen)
    h = scipy.signal.hanning(wlen*2)
    # overlap add    
    phase_offset=0
    for i in pylab.arange(num_steps):
        freq = f0*2**(((i*step_size)%12)/12.0)        
        s = shepard(params, f0=freq, num_octaves=num_octaves, num_points=hop+wlen, 
                    phase_offset=0, center_freq=center_freq, band_width=band_width)
        s[0:wlen] *= h[0:wlen]
        s[hop:hop+wlen] *= h[wlen:wlen*2]
        x[i*hop:(i+1)*hop+wlen] += s
        phase_offset += hop*freq*norm_freq
    if not overlap:
        x = pylab.resize(x, num_steps*hop)
    x /= pylab.rms_flat(x)
    return x
Esempio n. 27
0
def evaluate_model(mod,
                   comment='',
                   data_fname='missing_noisy_data.csv',
                   truth_fname='data.csv'):
    """ Run specified model on existing data (data.csv / missing_noisy_data.csv) and save results in dev_log.csv
    Existing models: %s """ % data_run_models
    if mod not in data_run_models.split(' '):
        raise TypeError, 'Unrecognized model "%s"; must be one of %s' % (
            mod, data_run_models)

    import model
    reload(model)

    print 'loading data'
    data = pl.csv2rec(data_fname)
    truth = pl.csv2rec(truth_fname)

    t0 = time.time()
    print 'generating model'
    mod_mc = eval('model.%s(data)' % mod)

    print 'fitting model with mcmc'
    mod_mc.sample(10000, 5000, 50, verbose=1)
    t1 = time.time()

    print 'summarizing results'

    import graphics
    reload(graphics)
    pl.figure(figsize=(22, 17), dpi=300)
    pl.clf()
    graphics.plot_all_predictions_over_time(data,
                                            mod_mc.predicted,
                                            more_data=truth)

    data_stats = mod_mc.data_predicted.stats()
    i_out = [i for i in range(len(data)) if pl.isnan(data.y[i])]
    rmse_abs_out = pl.rms_flat(truth.y[i_out] - data_stats['mean'][i_out])
    rmse_rel_out = 100 * pl.rms_flat(1. - data_stats['mean'][i_out] /
                                     truth.y[i_out])

    i_in = [i for i in range(len(data)) if not pl.isnan(data.y[i])]
    rmse_abs_in = pl.rms_flat(truth.y[i_in] - data_stats['mean'][i_in])
    rmse_rel_in = 100 * pl.rms_flat(1. -
                                    data_stats['mean'][i_in] / truth.y[i_in])

    param_stats = mod_mc.param_predicted.stats()
    coverage = 100 * pl.sum(
        (truth.y[i_out] >= param_stats['95% HPD interval'][i_out, 0]) &
        (truth.y[i_out] <= param_stats['95% HPD interval'][i_out, 1])) / float(
            len(i_out))

    import md5
    data_hash = md5.md5(data).hexdigest()
    results = [
        mod, t1 - t0, rmse_abs_out, rmse_rel_out, rmse_abs_in, rmse_rel_in,
        coverage,
        len(data),
        len(pl.unique(data.region)),
        len(pl.unique(data.country)),
        len(pl.unique(data.year)),
        len(pl.unique(data.age)), data_hash, t0, comment
    ]
    print '%s: time: %.0fs out-of-samp rmse abs=%.1f rel=%.0f in-samp rmse abs=%.1f rel=%.0f coverage=%.0f\ndata: %d rows; %d regions, %d countries %d years %d ages [data hash: %s]\n(run conducted at %f)\n%s' % tuple(
        results)

    pl.savefig('/home/j/Project/Models/space-time-smoothing/images/%s.png' %
               t0)  # FIXME: don't hardcode path for saving images

    import csv
    f = open('dev_log.csv', 'a')
    f_csv = csv.writer(f)
    f_csv.writerow(results)
    f.close()

    return mod_mc
Esempio n. 28
0
def xcorr(x, y=None, maxlags=None, norm='biased'):
    """Cross-correlation using numpy.correlate
    
    Estimates the cross-correlation (and autocorrelation) sequence of a random
    process of length N. By default, there is no normalisation and the output
    sequence of the cross-correlation has a length 2*N+1. 
    
    :param array x: first data array of length N
    :param array y: second data array of length N. If not specified, computes the 
        autocorrelation. 
    :param int maxlags: compute cross correlation between [-maxlags:maxlags]
        when maxlags is not specified, the range of lags is [-N+1:N-1].
    :param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
     
    The true cross-correlation sequence is
    
    .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])

    However, in practice, only a finite segment of one realization of the 
    infinite-length random process is available.
    
    The correlation is estimated using numpy.correlate(x,y,'full'). 
    Normalisation is handled by this function using the following cases:

        * 'biased': Biased estimate of the cross-correlation function
        * 'unbiased': Unbiased estimate of the cross-correlation function
        * 'coeff': Normalizes the sequence so the autocorrelations at zero 
           lag is 1.0.

    :return:
        * a numpy.array containing the cross-correlation sequence (length 2*N-1)
        * lags vector
        
    .. note:: If x and y are not the same length, the shorter vector is 
        zero-padded to the length of the longer vector.
               
    .. rubric:: Examples
    
    .. doctest::
    
        >>> from spectrum import *
        >>> x = [1,2,3,4,5]
        >>> c, l = xcorr(x,x, maxlags=0, norm='biased')
        >>> c
        array([ 11.])
    
    .. seealso:: :func:`CORRELATION`.  
    """
    N = len(x)
    if y == None:
        y = x
    assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed'
    assert maxlags <= N, 'maxlags must be less than data length'
    
    if maxlags == None:
        maxlags = N-1
        lags = arange(0, 2*N-1)
    else:
        assert maxlags < N
        lags = arange(N-maxlags-1, N+maxlags)
              
    res = numpy.correlate(x, y, mode='full')
    
    if norm == 'biased':
        Nf = float(N)
        res = res[lags] / float(N)    # do not use /= !! 
    elif norm == 'unbiased':
        res = res[lags] / (float(N)-abs(arange(-N+1, N)))[lags]
    elif norm == 'coeff':        
        Nf = float(N)
        rms = rms_flat(x) * rms_flat(y)
        res = res[lags] / rms / Nf
    else:
        res = res[lags]

    lags = arange(-maxlags, maxlags+1)        
    return res, lags
Esempio n. 29
0
def CORRELATION(x, y=None, maxlags=None, norm='unbiased'):
    r"""Correlation function

    This function should give the same results as :func:`xcorr` but it 
    returns the positive lags only. Moreover the algorithm does not use
    FFT as compared to other algorithms. 
     
    :param array x: first data array of length N
    :param array y: second data array of length N. If not specified, computes the 
        autocorrelation. 
    :param int maxlags: compute cross correlation between [0:maxlags]
        when maxlags is not specified, the range of lags is [0:maxlags].
    :param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']
     
        * *biased*   correlation=raw/N, 
        * *unbiased* correlation=raw/(N-`|lag|`)
        * *coeff*    correlation=raw/(rms(x).rms(y))/N
        * None       correlation=raw

    :return: 
        * a numpy.array correlation sequence,  r[1,N]
        * a float for the zero-lag correlation,  r[0]
    
    The *unbiased* correlation has the form:
    
    .. math::

        \hat{r}_{xx} = \frac{1}{N-m}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T 

    The *biased* correlation differs by the front factor only:

    .. math::

        \check{r}_{xx} = \frac{1}{N}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T 

    with :math:`0\leq m\leq N-1`.
    
    .. doctest::
    
        >>> from spectrum import *
        >>> x = [1,2,3,4,5]
        >>> res = CORRELATION(x,x, maxlags=0, norm='biased')
        >>> res[0]
        11.0
        
    .. note:: this function should be replaced by :func:`xcorr`.
    
    .. seealso:: :func:`xcorr`
    """
    assert norm in ['unbiased','biased', 'coeff', None]
    #transform lag into list if it is an integer
    if y == None:
        y = x
    
    # N is the max of x and y
    N = max(len(x), len(y))
    if len(x)<N:
        y = y.copy()
        y.resize(N)
    if len(y)<N:
        y = y.copy()
        y.resize(N)
            
    #default lag is N-1
    if maxlags == None:
        maxlags = N - 1
    assert maxlags < N, 'lag must be less than len(x)'
    
    realdata = isrealobj(x) and isrealobj(y)
    #create an autocorrelation array with same length as lag
    if realdata == True:
        r = numpy.zeros(maxlags, dtype=float)
    else:
        r = numpy.zeros(maxlags, dtype=complex)

    if norm == 'coeff':
        rmsx = rms_flat(x)
        rmsy = rms_flat(y)
        
    for k in range(0, maxlags+1):
        nk = N - k - 1
        
        if realdata == True:
            sum = 0
            for j in range(0, nk+1):
                sum = sum + x[j+k] * y[j]
        else:
            sum = 0. + 0j
            for j in range(0, nk+1):
                sum = sum + x[j+k] * y[j].conjugate()
        if k == 0:
            if norm in ['biased', 'unbiased']:
                r0 = sum/float(N)
            elif norm == None:
                r0 = sum
            else:
                r0 =  1.
        else:
            if norm == 'unbiased':
                r[k-1] = sum / float(N-k)
            elif norm == 'biased':
                r[k-1] = sum / float(N)
            elif norm == None:
                r[k-1] = sum
            elif norm == 'coeff':
                r[k-1] =  sum/(rmsx*rmsy)/float(N)

    r = numpy.insert(r, 0, r0)
    return r
Esempio n. 30
0
def xcorr_fft(x, y=None, maxlags=None, norm='ceoff',doDetrend=False):
    '''
    Cross-correlation using scipy.fftconvolve. Similar returns as TurbulenceTools.xcorr()
    but faster with fftconvolve.

    copy from http://subversion.assembla.com/svn/PySpectrum/trunk/src/spectrum/correlation.py
    and futher modified for flow analysis.

    Estimates the cross-correlation (and autocorrelation) sequence of a random
    process of length N. By default, there is no normalisation and the output
    sequence of the cross-correlation has a length 2*N+1.

    Arguments:
        * x: first data array of length N
        * y: second data array of length N. If not specified, computes the
        autocorrelation.
        * maxlags: compute cross correlation between [-maxlags:maxlags]
        when maxlags is not specified, the range of lags is [-N+1:N-1].
        * norm: ['biased', 'unbiased', None, 'coeff'] normalisation
        * doDetrend: [bool()] do a data detrend. Useful from data from mesurment

    The true cross-correlation sequence is:
        * r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])

    However, in practice, only a finite segment of one realization of the
    infinite-length random process is available.

    The correlation is estimated using numpy.correlate(x,y,'full').
    Normalisation is handled by this function using the following cases:

        * 'biased': Biased estimate of the cross-correlation function
        * 'unbiased': Unbiased estimate of the cross-correlation function
        * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0.

    returns:
        * xcorr: [np.array, shape=(N-1,1)]a numpy.array containing the cross-correlation sequence
        * lags: [np.array, shape=(N-1,1)] lag vector

    notes:
        * If x and y are not the same length, the shorter vector is
        zero-padded to the length of the longer vector.
    '''

    N = len(x)
    if y == None:
        y = x

    if doDetrend:
        x=spsig.detrend(x)
        y=spsig.detrend(y)

    assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed'
    assert maxlags <= N, 'maxlags must be less than data length'

    if maxlags == None:
        maxlags = N-1
        lags = np.arange(0, 2*N-1)
    else:
        assert maxlags < N
        lags = np.arange(N-maxlags-1, N+maxlags)

    res = spsig.fftconvolve(x, y[::-1], mode="full")

    if norm == 'biased':
        Nf = float(N)
        res = res[lags] / float(N)    # do not use /= !!
    elif norm == 'unbiased':
        res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags]
    elif norm == 'coeff':
        Nf = float(N)
        rms = pl.rms_flat(x) * pl.rms_flat(y)
        if rms==0:
            rms=1
        #rms = (np.mean(x**2)*np.mean(y**2))**(0.5)
        res = res[lags] / rms / Nf
    else:
        res = res[lags]

    res=res[(len(res)-1)/2:-1]
    lags = np.arange(0, maxlags)
    return res, lags
Esempio n. 31
0
def xcorr_fft(x, y=None, maxlags=None, norm='ceoff', doDetrend=False):
    '''
    Cross-correlation using scipy.fftconvolve. Similar returns as TurbulenceTools.xcorr()
    but faster with fftconvolve.

    copy from http://subversion.assembla.com/svn/PySpectrum/trunk/src/spectrum/correlation.py
    and futher modified for flow analysis.

    Estimates the cross-correlation (and autocorrelation) sequence of a random
    process of length N. By default, there is no normalisation and the output
    sequence of the cross-correlation has a length 2*N+1.

    Arguments:
        * x: first data array of length N
        * y: second data array of length N. If not specified, computes the
        autocorrelation.
        * maxlags: compute cross correlation between [-maxlags:maxlags]
        when maxlags is not specified, the range of lags is [-N+1:N-1].
        * norm: ['biased', 'unbiased', None, 'coeff'] normalisation
        * doDetrend: [bool()] do a data detrend. Useful from data from mesurment

    The true cross-correlation sequence is:
        * r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])

    However, in practice, only a finite segment of one realization of the
    infinite-length random process is available.

    The correlation is estimated using numpy.correlate(x,y,'full').
    Normalisation is handled by this function using the following cases:

        * 'biased': Biased estimate of the cross-correlation function
        * 'unbiased': Unbiased estimate of the cross-correlation function
        * 'coeff': Normalizes the sequence so the autocorrelations at zero lag is 1.0.

    returns:
        * xcorr: [np.array, shape=(N-1,1)]a numpy.array containing the cross-correlation sequence
        * lags: [np.array, shape=(N-1,1)] lag vector

    notes:
        * If x and y are not the same length, the shorter vector is
        zero-padded to the length of the longer vector.
    '''

    N = len(x)
    if y == None:
        y = x

    if doDetrend:
        x = spsig.detrend(x)
        y = spsig.detrend(y)

    assert len(x) == len(
        y), 'x and y must have the same length. Add zeros if needed'
    assert maxlags <= N, 'maxlags must be less than data length'

    if maxlags == None:
        maxlags = N - 1
        lags = np.arange(0, 2 * N - 1)
    else:
        assert maxlags < N
        lags = np.arange(N - maxlags - 1, N + maxlags)

    res = spsig.fftconvolve(x, y[::-1], mode="full")

    if norm == 'biased':
        Nf = float(N)
        res = res[lags] / float(N)  # do not use /= !!
    elif norm == 'unbiased':
        res = res[lags] / (float(N) - abs(np.arange(-N + 1, N)))[lags]
    elif norm == 'coeff':
        Nf = float(N)
        rms = pl.rms_flat(x) * pl.rms_flat(y)
        if rms == 0:
            rms = 1
        #rms = (np.mean(x**2)*np.mean(y**2))**(0.5)
        res = res[lags] / rms / Nf
    else:
        res = res[lags]

    res = res[(len(res) - 1) / 2:-1]
    lags = np.arange(0, maxlags)
    return res, lags
Esempio n. 32
0
def xcorr(x, y=None, maxlags=None, norm='biased'):
    """Cross-correlation using numpy.correlate
    
    Estimates the cross-correlation (and autocorrelation) sequence of a random
    process of length N. By default, there is no normalisation and the output
    sequence of the cross-correlation has a length 2*N+1. 
    
    :param array x: first data array of length N
    :param array y: second data array of length N. If not specified, computes the 
        autocorrelation. 
    :param int maxlags: compute cross correlation between [-maxlags:maxlags]
        when maxlags is not specified, the range of lags is [-N+1:N-1].
    :param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
     
    The true cross-correlation sequence is
    
    .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])

    However, in practice, only a finite segment of one realization of the 
    infinite-length random process is available.
    
    The correlation is estimated using numpy.correlate(x,y,'full'). 
    Normalisation is handled by this function using the following cases:

        * 'biased': Biased estimate of the cross-correlation function
        * 'unbiased': Unbiased estimate of the cross-correlation function
        * 'coeff': Normalizes the sequence so the autocorrelations at zero 
           lag is 1.0.

    :return:
        * a numpy.array containing the cross-correlation sequence (length 2*N-1)
        * lags vector
        
    .. note:: If x and y are not the same length, the shorter vector is 
        zero-padded to the length of the longer vector.
               
    .. rubric:: Examples
    
    .. doctest::
    
        >>> from spectrum import *
        >>> x = [1,2,3,4,5]
        >>> c, l = xcorr(x,x, maxlags=0, norm='biased')
        >>> c
        array([ 11.])
    
    .. seealso:: :func:`CORRELATION`.  
    """
    N = len(x)
    if y == None:
        y = x
    assert len(x) == len(
        y), 'x and y must have the same length. Add zeros if needed'
    assert maxlags <= N, 'maxlags must be less than data length'

    if maxlags == None:
        maxlags = N - 1
        lags = arange(0, 2 * N - 1)
    else:
        assert maxlags < N
        lags = arange(N - maxlags - 1, N + maxlags)

    res = numpy.correlate(x, y, mode='full')

    if norm == 'biased':
        Nf = float(N)
        res = res[lags] / float(N)  # do not use /= !!
    elif norm == 'unbiased':
        res = res[lags] / (float(N) - abs(arange(-N + 1, N)))[lags]
    elif norm == 'coeff':
        Nf = float(N)
        rms = rms_flat(x) * rms_flat(y)
        res = res[lags] / rms / Nf
    else:
        res = res[lags]

    lags = arange(-maxlags, maxlags + 1)
    return res, lags
Esempio n. 33
0
def CORRELATION(x, y=None, maxlags=None, norm='unbiased'):
    r"""Correlation function

    This function should give the same results as :func:`xcorr` but it 
    returns the positive lags only. Moreover the algorithm does not use
    FFT as compared to other algorithms. 
     
    :param array x: first data array of length N
    :param array y: second data array of length N. If not specified, computes the 
        autocorrelation. 
    :param int maxlags: compute cross correlation between [0:maxlags]
        when maxlags is not specified, the range of lags is [0:maxlags].
    :param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']
     
        * *biased*   correlation=raw/N, 
        * *unbiased* correlation=raw/(N-`|lag|`)
        * *coeff*    correlation=raw/(rms(x).rms(y))/N
        * None       correlation=raw

    :return: 
        * a numpy.array correlation sequence,  r[1,N]
        * a float for the zero-lag correlation,  r[0]
    
    The *unbiased* correlation has the form:
    
    .. math::

        \hat{r}_{xx} = \frac{1}{N-m}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T 

    The *biased* correlation differs by the front factor only:

    .. math::

        \check{r}_{xx} = \frac{1}{N}T \sum_{n=0}^{N-m-1} x[n+m]x^*[n] T 

    with :math:`0\leq m\leq N-1`.
    
    .. doctest::
    
        >>> from spectrum import *
        >>> x = [1,2,3,4,5]
        >>> res = CORRELATION(x,x, maxlags=0, norm='biased')
        >>> res[0]
        11.0
        
    .. note:: this function should be replaced by :func:`xcorr`.
    
    .. seealso:: :func:`xcorr`
    """
    assert norm in ['unbiased', 'biased', 'coeff', None]
    #transform lag into list if it is an integer
    if y == None:
        y = x

    # N is the max of x and y
    N = max(len(x), len(y))
    if len(x) < N:
        y = y.copy()
        y.resize(N)
    if len(y) < N:
        y = y.copy()
        y.resize(N)

    #default lag is N-1
    if maxlags == None:
        maxlags = N - 1
    assert maxlags < N, 'lag must be less than len(x)'

    realdata = isrealobj(x) and isrealobj(y)
    #create an autocorrelation array with same length as lag
    if realdata == True:
        r = numpy.zeros(maxlags, dtype=float)
    else:
        r = numpy.zeros(maxlags, dtype=complex)

    if norm == 'coeff':
        rmsx = rms_flat(x)
        rmsy = rms_flat(y)

    for k in range(0, maxlags + 1):
        nk = N - k - 1

        if realdata == True:
            sum = 0
            for j in range(0, nk + 1):
                sum = sum + x[j + k] * y[j]
        else:
            sum = 0. + 0j
            for j in range(0, nk + 1):
                sum = sum + x[j + k] * y[j].conjugate()
        if k == 0:
            if norm in ['biased', 'unbiased']:
                r0 = sum / float(N)
            elif norm == None:
                r0 = sum
            else:
                r0 = 1.
        else:
            if norm == 'unbiased':
                r[k - 1] = sum / float(N - k)
            elif norm == 'biased':
                r[k - 1] = sum / float(N)
            elif norm == None:
                r[k - 1] = sum
            elif norm == 'coeff':
                r[k - 1] = sum / (rmsx * rmsy) / float(N)

    r = numpy.insert(r, 0, r0)
    return r
Esempio n. 34
0
 def bmodel(self, XY=None, plane=0): 
     shp=self.imTool.shape()
     result=[]
     blc=[int(0),int(0),int(plane),int(0)]
     trc=[int(shp[0]-1), int(shp[1]-1), int(plane), int(0)]
     reg=self.rgTool.box(blc=blc, trc=trc)
     dat=self.imTool.getchunk(blc=blc, trc=trc, dropdeg=True)
     self.show(dat)
     a={'return':{}}
     residual = 'framework.resid.tmp'
     
     try:
         a = self.imTool.fitcomponents(region=reg, residual=residual)
         if (not a['converged']):
            a = self._retryFit(shp, plane, residual)
     except:
         a = self._retryFit(shp, plane, residual)
     resid = []
     if (a['converged']):
         origName = self.imTool.name()
         self.imTool.open(residual)
         resid = self.imTool.getchunk(blc=blc, trc=trc, dropdeg=True)
         #residshape = resid.shape
         #resid = resid.reshape(residshape[0], residshape[1])
         self.imTool.open(origName)
     else:
         resid = pylab.array([])
     body2=[]
     fit_results = a['results']
     tostr=lambda a: str(a[0]) if (type(a)==list) else str(a)
     if(fit_results.has_key('component0')):
         ra = tostr(self.qaTool.time(fit_results['component0']['shape']['direction']['m0']))
         dec = tostr(self.qaTool.angle(fit_results['component0']['shape']['direction']['m1']))
         bmaj= tostr(self.qaTool.angle(fit_results['component0']['shape']['majoraxis'], form='dig2'))
         bmin = tostr(self.qaTool.angle(fit_results['component0']['shape']['minoraxis'], form='dig2'))
         bpa = tostr(self.qaTool.angle(fit_results['component0']['shape']['positionangle']))
         flux = str('%4.2f'%fit_results['component0']['flux']['value'][0])+fit_results['component0']['flux']['unit']
         result.append([ra, dec, bmaj, bmin, bpa, flux])
         ss='fit:\testimated center: %s  %s\n'%(ra,dec)+'\tMajAxis : %s  \tMinAxis: %s \tPosAng: %s'%(bmaj, bmin, bpa)+' flux= '+flux
         body2.append('<pre>%s</pre>'%ss)
     else:
         result.append(False)
         body2.append('<pre>Failed to converge in fitting</pre>')
     #write to web page
     if self.write:
         header='Image  of plane%d of  %s'%(plane,self.imageName)
         body1=['<pre>The image generated with pylab:</pre>']
     #body2=['maximum: %f'%(max1),'minimum: %f'%(min1),'rms: %f'%(rms)]
         saveDir=self.imDir+self.fname[11:-5]+'-model%d.png'%(plane)
         pylab.savefig(saveDir)
         self.htmlPub.doBlk(body1, body2, saveDir,header)
     rms=0.0
     if(result[0] != False):
         self.show(resid)
         rms=pylab.rms_flat(resid)
         min1,max1=self.min_max(resid)
         print 'rms of residual image: %f'%(rms)
         #write to web page
         if self.write:
             header='Residual from plane%d of image %s'%(plane,self.imageName)
             body1=['<pre>The image generated with pylab:</pre>']
             body2=['<pre>maximum: %f</pre>'%(max1),'<pre>minimum: %f</pre>'%(min1),'<pre>rms: %f</pre>'%(rms)]
             saveDir=self.imDir+self.fname[11:-5]+'-resid%d.png'%(plane)
             pylab.savefig(saveDir)
             self.htmlPub.doBlk(body1, body2, saveDir,header)
     return result, rms
Esempio n. 35
0
    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)

    ## fit model
    mc.MCMC([pi, delta, obs, pred]).sample(iter, burn, thin)


    ## record results
    for i, stoch in enumerate([pi, pred]):
        median = stoch.stats()['quantiles'][50]
        residuals[i].append(pi_true - median)
        lb, ub = stoch.stats()['95% HPD interval']
        coverage[i].append(lb <= pi_true <= ub)

### @export 'summarize-results'
bias = {}
rmse = {}
percent_coverage = {}

for i, s in enumerate(['pi', 'pred']):
    bias[s] = '%.5f' % pl.mean(residuals[i])
    rmse[s] = '%.3f' % pl.rms_flat(residuals[i])
    percent_coverage[s] = '%.2f' % pl.mean(coverage[i])

print 'bias', bias
print 'rmse', rmse
print 'percent coverage', percent_coverage

book_graphics.save_json('neg_binom_sim.json', vars())
Esempio n. 36
0
    def bmodel(self, XY=None, plane=0):
        shp = self.imTool.shape()
        result = []
        blc = [int(0), int(0), int(plane), int(0)]
        trc = [int(shp[0] - 1), int(shp[1] - 1), int(plane), int(0)]
        reg = self.rgTool.box(blc=blc, trc=trc)
        dat = self.imTool.getchunk(blc=blc, trc=trc, dropdeg=True)
        self.show(dat)
        a = {'return': {}}
        residual = 'framework.resid.tmp'

        try:
            a = self.imTool.fitcomponents(region=reg, residual=residual)
            if (not a['converged']):
                a = self._retryFit(shp, plane, residual)
        except:
            a = self._retryFit(shp, plane, residual)
        resid = []
        if (a['converged']):
            origName = self.imTool.name()
            self.imTool.open(residual)
            resid = self.imTool.getchunk(blc=blc, trc=trc, dropdeg=True)
            #residshape = resid.shape
            #resid = resid.reshape(residshape[0], residshape[1])
            self.imTool.open(origName)
        else:
            resid = pylab.array([])
        body2 = []
        fit_results = a['results']
        tostr = lambda a: str(a[0]) if (type(a) == list) else str(a)
        if (fit_results.has_key('component0')):
            ra = tostr(
                self.qaTool.time(
                    fit_results['component0']['shape']['direction']['m0']))
            dec = tostr(
                self.qaTool.angle(
                    fit_results['component0']['shape']['direction']['m1']))
            bmaj = tostr(
                self.qaTool.angle(
                    fit_results['component0']['shape']['majoraxis'],
                    form='dig2'))
            bmin = tostr(
                self.qaTool.angle(
                    fit_results['component0']['shape']['minoraxis'],
                    form='dig2'))
            bpa = tostr(
                self.qaTool.angle(
                    fit_results['component0']['shape']['positionangle']))
            flux = str('%4.2f' % fit_results['component0']['flux']['value'][0]
                       ) + fit_results['component0']['flux']['unit']
            result.append([ra, dec, bmaj, bmin, bpa, flux])
            ss = 'fit:\testimated center: %s  %s\n' % (
                ra, dec) + '\tMajAxis : %s  \tMinAxis: %s \tPosAng: %s' % (
                    bmaj, bmin, bpa) + ' flux= ' + flux
            body2.append('<pre>%s</pre>' % ss)
        else:
            result.append(False)
            body2.append('<pre>Failed to converge in fitting</pre>')
        #write to web page
        if self.write:
            header = 'Image  of plane%d of  %s' % (plane, self.imageName)
            body1 = ['<pre>The image generated with pylab:</pre>']
            #body2=['maximum: %f'%(max1),'minimum: %f'%(min1),'rms: %f'%(rms)]
            saveDir = self.imDir + self.fname[11:-5] + '-model%d.png' % (plane)
            pylab.savefig(saveDir)
            self.htmlPub.doBlk(body1, body2, saveDir, header)
        rms = 0.0
        if (result[0] != False):
            self.show(resid)
            rms = pylab.rms_flat(resid)
            min1, max1 = self.min_max(resid)
            print 'rms of residual image: %f' % (rms)
            #write to web page
            if self.write:
                header = 'Residual from plane%d of image %s' % (plane,
                                                                self.imageName)
                body1 = ['<pre>The image generated with pylab:</pre>']
                body2 = [
                    '<pre>maximum: %f</pre>' % (max1),
                    '<pre>minimum: %f</pre>' % (min1),
                    '<pre>rms: %f</pre>' % (rms)
                ]
                saveDir = self.imDir + self.fname[11:-5] + '-resid%d.png' % (
                    plane)
                pylab.savefig(saveDir)
                self.htmlPub.doBlk(body1, body2, saveDir, header)
        return result, rms