Exemple #1
0
def sampleWithProgress( p0, sampler, nsamples, **kwargs ):
    
    progress = ProgressBar( nsamples )
    
    for i, result in enumerate( sampler.sample(p0, iterations = nsamples, **kwargs) ):
        
        progress.animate( ( i + 1 ) )
        
        pos, prob, state = result
    
    progress.animate( nsamples )
    print("")
    return pos, prob, state
Exemple #2
0
  def getContours(self, src1, param1, p1min, p1max, p1steps,
                        src2=None, param2=None, p2min=None, p2max=None, p2steps=None,
                        **kwargs):
    
    if(not hasattr(self,'currentMinimum')):
      raise RuntimeError("You have to run the .fit method before calling getContours.")
    
    
    if param1==param2 and src1==src2:
      
      raise RuntimeError("You have to specify two different parameters.")
    
    if p1min > p1max:
      
      raise RuntimeError("Minimum larger than maximum for parameter 1")
    
    if (not src2 is None) and p2min > p2max:
      
      raise RuntimeError("Minimum larger than maximum for parameter 2")
    
    #Default values
    debug = False
    
    #Check the current keywords
    for k,v in kwargs.iteritems():
        
        if k=="debug":
            
            debug = bool( v )
    
    
    #Check that parameters are existent and free
    for s,p in zip( [ src1, src2 ], [ param1, param2 ] ):
      
      if s is None:
          
          #Stepping through one parameter only
          continue
      
      if ( (s, p) not in self.freeParameters.keys() ):
           
           raise ValueError("Parameter %s of source %s is not a free parameter of current model" %(p,s))
    
    if not threeML_config['parallel']['use-parallel']:
      
      #Create a new minimizer to avoid messing up with the best
      #fit
      
      #Copy of the parameters
      backup_freeParameters = copy.deepcopy(self.freeParameters)
      
      minimizer = self.Minimizer(self.minusLogLikeProfile,
                                 self.freeParameters)
      
      a, b, cc = minimizer.contours(src1, param1, p1min, p1max, p1steps,
                                    src2, param2, p2min, p2max, p2steps,
                                    True, **kwargs)
      
      #Restore the original
      self.freeParameters = backup_freeParameters
      
      if src2 is None:
      
          cc = cc[:, 0]
                                             
    else:
      
      #With parallel computation
      
      #Connect to the engines
      try:
        
        client = ParallelClient(**kwargs)
      
      except:
        
        sys.stderr.write("\nCannot connect to IPython cluster. Is the cluster running ?\n\n\n")
        
        raise RuntimeError("Cannot connect to IPython cluster.")

      
      threads = client.getNumberOfEngines()
      
      if( threads > p1steps):
        
        threads = int(p1steps)
        
        warnings.warn("The number of threads is larger than the number of steps. Reducing it to %s." %(threads))
                  
      #Check if the number of steps is divisible by the number
      #of threads, otherwise issue a warning and make it so
      
      if( float(p1steps) % threads != 0 ):
        
        p1steps = p1steps // threads * threads
        
        warnings.warn("The number of steps is not a multiple of the number of threads. Reducing steps to %s" %(p1steps))
      
      #Now this is guaranteed to be a integer number
      p1_split_steps = p1steps // int(threads)
      
      #Prepare arrays for results
      
      if src2 is None:
          
          #One array
          pcc = pcc = numpy.zeros( ( p1steps ) )
          
          pa = numpy.linspace( p1min, p1max, p1steps )
          pb = None
      
      else:
      
          pcc = numpy.zeros( ( p1steps, p2steps ) )
      
          #Prepare the two axes of the parameter space
          pa = numpy.linspace( p1min, p1max, p1steps )
          pb = numpy.linspace( p2min, p2max, p2steps )
      
      #Define the parallel worker which will go through the computation
      
      #NOTE: I only divide
      #on the first parameter axis so that the different
      #threads are more or less well mixed for points close and
      #far from the best fit
      
      def worker(i):
        
        #Re-create the minimizer
        
        #backup_freeParameters = copy.deepcopy(self.freeParameters)
        
        minimizer = self.Minimizer(self.minusLogLikeProfile,
                                   self.freeParameters)
        
        this_p1min = pa[i * p1_split_steps]
        this_p1max = pa[(i+1) * p1_split_steps - 1]
        
        if debug:
            
            print("From %s to %s" %(this_p1min, this_p1max))
        
        aa, bb, ccc = minimizer.contours(src1, param1, this_p1min, this_p1max, p1_split_steps,
                                         src2, param2, p2min, p2max, p2steps,
                                         False, **kwargs)
        
        #self.freeParameters = backup_freeParameters
        
        return ccc
              
      lview = client.load_balanced_view()
      #lview.block = True
      amr = lview.map_async(worker, range(threads))
      
      #Execute and print progress
      
      prog = ProgressBar(threads)
      
      while not amr.ready():
        
        #Avoid checking too often
        time.sleep(1 + np.random.uniform(0,1))
        
        if(debug):
          stdouts = amr.stdout
          
          # clear_output doesn't do much in terminal environments
          for stdout, stderr in zip(amr.stdout, amr.stderr):
            if stdout:
                print "%s" % (stdout[-1000:])
            if stderr:
                print "%s" % (stderr[-1000:])
          sys.stdout.flush()        
        
        prog.animate( amr.progress - 1 )
      
      #Force to display 100% at the end
      prog.animate( threads - 1 )
      
      #Now get results and print some diagnostic
      print("\n")
      
      #print("Serial time: %1.f (speed-up: %.1f)" %(amr.serial_time, float(amr.serial_time) / amr.wall_time))
      
      res = amr.get()
      
      for i in range(threads):
        
        if src2 is None:
        
            pcc[ i * p1_split_steps : (i+1) * p1_split_steps ] = res[i][:,0]
        
        else:
        
            pcc[ i * p1_split_steps : (i+1) * p1_split_steps, : ] = res[i]
      
      #Keep them separated up to now for debugging purposes
      cc = pcc
      a = pa
      b = pb
      
    pass
    
    if src2 is not None:
    
        fig = self._plotContours("%s of %s" %(param1, src1), a, "%s of %s" % (param2, src2), b, cc)
    
    else:
        
        fig = self._plotProfile( "%s of %s" %(param1, src1), a, cc )
    
    #Check if we found a better minimum
    if( self.currentMinimum - cc.min() > 0.1 ):
      
      if src2 is not None:
      
          idx = cc.argmin()
          
          aidx, bidx = numpy.unravel_index(idx, cc.shape)
          
          print("\nFound a better minimum: %s with %s = %s and %s = %s" 
                 %(cc.min(),param1,a[aidx],param2,b[bidx]))
      
      else:
          
          idx = cc.argmin()
          
          print("Found a better minimum: %s with %s = %s" 
                 %(cc.min(),param1,a[idx]))
    
    pass
    
    return a, b, cc, fig