Exemple #1
0
	def plot_greens(self, shear_normal='shear', greens_ary=None, fnum=0, do_sort=True, y_scale='log', x_scale='linear'):
		'''
		# default prams: (self, shear_normal='shear', greens_ary=None, fnum=0, do_sort=True, y_scale='log', x_scale='linear')
		#
		# note that this can memory-explode for large greens files. it might be better to use plot_green_hist with (cumulative=True) parameter
		# and bins= or n_bins={something reasonable} .
		'''
		#
		if greens_ary == None:
			shear_normal = shear_normal_aliases(shear_normal)
			if shear_normal=='greens_shear': g_data = self.get_shear()
			if shear_normal=='greens_normal': g_data = self.get_normal()
		#
		sh_0 = g_data.shape
		g_data.shape = (1, g_data.size)
		#
		plt.figure(fnum)
		plt.ion()
		plt.clf()
		ax = plt.gca()
		ax.set_xscale(x_scale)
		ax.set_yscale(y_scale)

		if not do_sort:
			ax.plot(xrange(g_data.size), g_data[0], '.')
			#plt.vlines(xrange(g_data.size), g_data[0], numpy.zeros(len(g_data[0])))
		#
		else:
			# and a distribution:
			# (but note, as stated above, for large arrays, this can be a problem; consider using plot_greens_hist() ).
			#	
			#print "lens: ", len(X), " ", len(Y)
			ax.plot([x+1 for x in xrange(len(g_data[0]))], sorted(g_data[0]), '.-')
		#
		del(g_data)
def main():
    pylab.ion();
    ind = [0,];
    ldft = [0,];
    lfft = [0,];
    lpfft = [0,]

    # plot a graph Dft vs Fft, lists just support size until 2**9
    for i in range(1, 9, 1):
        t_before = time.clock();
        dsprocessing.dspDft(rand(2**i).tolist());
        dt = time.clock() - t_before;
        ldft.append(dt);
        print ("dft ", 2**i, dt);
        #pylab.plot([2**i,], [time.clock()-t_before,]);
        t_before = time.clock();
        dsprocessing.dspFft(rand(2**i).tolist());
        dt = time.clock() - t_before;
        print ("fft ", 2**i, dt);
        lfft.append(dt);
        #pylab.plot([2**i,], [time.clock()-t_before,]);
        ind.append(2**i);
        # python fft just to compare
        t_before = time.clock();
        pylab.fft(rand(2**i).tolist());
        dt = time.clock() - t_before;
        lpfft.append(dt);

    pylab.plot(ind, ldft);
    pylab.plot(ind, lfft);
    pylab.plot(ind, lpfft);
    pylab.show();
    return [ind, ldft, lfft, lpfft];
    def InitializePlot(self, goal_config):
        self.fig = pl.figure()
        lower_limits, upper_limits = self.boundary_limits
        pl.xlim([lower_limits[0], upper_limits[0]])
        pl.ylim([lower_limits[1], upper_limits[1]])
        pl.plot(goal_config[0], goal_config[1], 'gx')

        # Show all obstacles in environment
        for b in self.robot.GetEnv().GetBodies():
            if b.GetName() == self.robot.GetName():
                continue
            bb = b.ComputeAABB()
            pl.plot([bb.pos()[0] - bb.extents()[0],
                     bb.pos()[0] + bb.extents()[0],
                     bb.pos()[0] + bb.extents()[0],
                     bb.pos()[0] - bb.extents()[0],
                     bb.pos()[0] - bb.extents()[0]],
                    [bb.pos()[1] - bb.extents()[1],
                     bb.pos()[1] - bb.extents()[1],
                     bb.pos()[1] + bb.extents()[1],
                     bb.pos()[1] + bb.extents()[1],
                     bb.pos()[1] - bb.extents()[1]], 'r')
                    
                     
        pl.ion()
        pl.show()
def hinton(W, maxWeight=None):
    """
    Draws a Hinton diagram for visualizing a weight matrix. 
    Temporarily disables matplotlib interactive mode if it is on, 
    otherwise this takes forever.
    """
    reenable = False
    if P.isinteractive():
        P.ioff()
    P.clf()
    height, width = W.shape
    if not maxWeight:
        maxWeight = 2**N.ceil(N.log(N.max(N.abs(W)))/N.log(2))

    P.fill(N.array([0,width,width,0]),N.array([0,0,height,height]),'gray')
    P.axis('off')
    P.axis('equal')
    for x in xrange(width):
        for y in xrange(height):
            _x = x+1
            _y = y+1
            w = W[y,x]
            if w > 0:
                _blob(_x - 0.5, height - _y + 0.5, min(1,w/maxWeight),'white')
            elif w < 0:
                _blob(_x - 0.5, height - _y + 0.5, min(1,-w/maxWeight),'black')
    if reenable:
        P.ion()
    P.show()
Exemple #5
0
def cmap_plot(cmdLine):

    pylab.figure(figsize=[5,10])
    a=outer(ones(10),arange(0,1,0.01))
    subplots_adjust(top=0.99,bottom=0.00,left=0.01,right=0.8)
    maps=[m for m in cm.datad if not m.endswith("_r")]
    maps.sort()
    l=len(maps)+1
    for i, m in enumerate(maps):
        print m
        subplot(l,1,i+1)
        pylab.setp(pylab.gca(),xticklabels=[],xticks=[],yticklabels=[],yticks=[])
        imshow(a,aspect='auto',cmap=get_cmap(m),origin="lower")
        pylab.text(100.85,0.5,m,fontsize=10)

# render plot

    if cmdLine: 
        pylab.show(block=True)
    else: 
        pylab.ion()
        pylab.plot([])
        pylab.ioff()
	
    status = 1
    return status
Exemple #6
0
def show_results(path, S, costs, animated=0, target_speed=.1, save_output=None):
  P.ion()
  fig1 = P.figure(0); fig1.clear()
  ax = fig1.add_subplot(1,1,1)
  ax.axis('scaled')
  ax.axis([-1,1,-1,1])

  if path:
    draw_path(ax, path)

  if animated:
    animate_car(ax, S, remove_car=True, sleep=animated, save_output=save_output)
  else:
    animate_car(ax, S, remove_car=False, sleep=0, save_output=save_output,
                alphas=linspace(0.1,.5,len(S))**2)

  if costs is not None:
    # show summary statistics
    fig2 = P.figure(1); fig2.clear()
    ax2 = fig2.add_subplot(2,1,1)
    ax3 = fig2.add_subplot(2,1,2)

    ax2.plot(costs, label='state cost');
    ax2.set_ylabel('Controller score')
    ax3.plot([speed for _,_,_,speed,_ in S], label='actual')
    ax3.plot([0, len(S)], [target_speed, target_speed], 'k--',
                       label='target')
    ax3.legend(loc='best')
    ax3.set_ylabel('speed')
Exemple #7
0
def test_path():
  "generate and draw a random path"
  path = genpath()
  P.ion()
  P.clf()
  draw_path(P.gca(), path)
  P.draw()
Exemple #8
0
def makeimg(wav):
	global callpath
	global imgpath

	fs, frames = wavfile.read(os.path.join(callpath, wav))
	
	pylab.ion()

	# generate specgram
	pylab.figure(1)
	
	# generate specgram
	pylab.specgram(
		frames,
		NFFT=256, 
		Fs=22050, 
		detrend=pylab.detrend_none,
		window=numpy.hamming(256),
		noverlap=192,
		cmap=pylab.get_cmap('Greys'))
	
	x_width = len(frames)/fs
	
	pylab.ylim([0,11025])
	pylab.xlim([0,round(x_width,3)-0.006])
	
	img_path = os.path.join(imgpath, wav.replace(".wav",".png"))

	pylab.savefig(img_path)
	
	return img_path
	def __init__(self, window_size, easting_offset, northing_offset):
		self.rad_to_deg = 180.0/pi
        # Get parameters
		self.plot_pose = plot_pose
		self.plot_gnss = plot_gnss
		self.plot_odometry = plot_odometry
		self.plot_yaw = plot_yaw
		self.trkpt_threshold = 0.1 # [m]
        
		# Initialize map
		self.offset_e = easting_offset
		self.offset_n = northing_offset
		self.window_size = window_size
		self.map_title = map_title
		self.odo = []
		self.gnss = []
		self.pose_pos = []
		self.odo_yaw = []
		self.gnss_yaw = []
		self.ahrs_yaw = []
		self.pose_yaw = []
		self.wpt_mode = 0
		self.wpt_destination = False
		self.wpt_target = False

		self.pose_image_save = True # save an image for time-lapse video generation
		self.pose_image_count = 0

		ion() # turn interaction mode on
    def InitializePlot(self, goal_config):  # default
        self.fig = pl.figure()
        pl.xlim([self.lower_limits[0], self.upper_limits[0]])
        pl.ylim([self.lower_limits[1], self.upper_limits[1]])
        pl.plot(goal_config[0], goal_config[1], "gx")

        # Show all obstacles in environment
        for b in self.robot.GetEnv().GetBodies():
            if b.GetName() == self.robot.GetName():
                continue
            bb = b.ComputeAABB()
            pl.plot(
                [
                    bb.pos()[0] - bb.extents()[0],
                    bb.pos()[0] + bb.extents()[0],
                    bb.pos()[0] + bb.extents()[0],
                    bb.pos()[0] - bb.extents()[0],
                    bb.pos()[0] - bb.extents()[0],
                ],
                [
                    bb.pos()[1] - bb.extents()[1],
                    bb.pos()[1] - bb.extents()[1],
                    bb.pos()[1] + bb.extents()[1],
                    bb.pos()[1] + bb.extents()[1],
                    bb.pos()[1] - bb.extents()[1],
                ],
                "r",
            )

        pl.ion()
        pl.show()
Exemple #11
0
    def __init__(self, ca, cmap=None):
        """
        CAPlotter() constructor keeps a reference to the CA model, and
        optionally a colormap to be used with plots.

        Parameters
        ----------
        ca : LandlabCellularAutomaton object
            Reference to a CA model
        cmap : Matplotlib colormap (optional)
            Colormap to be used in plotting
        """
        import matplotlib

        # Set the colormap; default to matplotlib's "jet" colormap
        if cmap is None:
            self._cmap = matplotlib.cm.jet
        else:
            self._cmap = cmap

        # Keep a reference to the CA model
        self.ca = ca

        # Initialize the plot and remember the grid type
        plt.ion()
        plt.figure(1)
        if type(ca.grid) is landlab.grid.hex.HexModelGrid:
            self.gridtype = 'hex'
        else:
            self.gridtype = 'rast'
Exemple #12
0
def qqplotfromq(qnull,qemp):
    '''
    Given uniform quartile values, and emprirical ones, make a qq plot
    '''    
    pl.ion()
    pl.plot(qnull, qemp, '.',markersize = 2)                
    addqqplotinfo(qnull,qnull.flatten().shape[0])
Exemple #13
0
 def drawModel(self):
     PL.ion() # bug fix by Alex Hill in 2013
     if self.modelFigure == None or self.modelFigure.canvas.manager.window == None:
         self.modelFigure = PL.figure()
     self.modelDrawFunc()
     self.modelFigure.canvas.manager.window.update()
     PL.show() # bug fix by Hiroki Sayama in 2016
Exemple #14
0
def bistability_analysis():
    f2_range = linspace(0, 0.4, 41)
    t = linspace(0, 50000, 1000)
    ion()
    ss_aBax_vals_up = []
    ss_aBax_vals_down = []

    for f2 in f2_range:
        model.parameters['Bid_0'].value = f2 * 1e-1
        bax_total = 2e-1

        # Do "up" portion of hysteresis plot
        model.parameters['aBax_0'].value = 0
        model.parameters['cBax_0'].value = bax_total
        x = odesolve(model, t)
        figure('up')
        plot(t, x['aBax_']/bax_total)
        ss_aBax_vals_up.append(x['aBax_'][-1]/bax_total)

        # Do "down" portion of hysteresis plot
        model.parameters['aBax_0'].value = bax_total
        model.parameters['cBax_0'].value = 0
        x = odesolve(model, t)
        figure('down')
        plot(t, x['aBax_']/bax_total)
        ss_aBax_vals_down.append(x['aBax_'][-1]/bax_total)

    figure()
    plot(f2_range, ss_aBax_vals_up, 'r')
    plot(f2_range, ss_aBax_vals_down, 'g')
Exemple #15
0
def qqplotp(pv,fileout = None, pnames=pnames(), rownames=rownames(),alphalevel = 0.05,legend=None,xlim=None,ylim=None,ycoord=10,plotsize="652x526",title=None,dohist=True):
     '''
     Read in p-values from filein and make a qqplot adn histogram.
     If fileout is provided, saves the qqplot only at present.
     Searches through p until one is found.   '''       
     
     import pylab as pl     
     pl.ion()     
          
     fs=8     
     h1=qqplot(pv, fileout, alphalevel,legend,xlim,ylim,addlambda=True)
     #lambda_gc=estimate_lambda(pv)
     #pl.legend(["gc="+ '%1.3f' % lambda_gc],loc=2)     
     pl.title(title,fontsize=fs)
     #wm=pl.get_current_fig_manager()
     #e.g. "652x526+100+10
     xcoord=100
     #wm.window.wm_geometry(plotsize + "+" + str(xcoord) + "+" + str(ycoord))

     if dohist:
         h2=pvalhist(pv)
         pl.title(title,fontsize=fs)
         #wm=pl.get_current_fig_manager()
         width_height=plotsize.split("x")
         buffer=10
         xcoord=int(xcoord + float(width_height[0])+buffer)
         #wm.window.wm_geometry(plotsize + "+" + str(xcoord) + "+" + str(ycoord))
     else: h2=None

     return h1,h2
Exemple #16
0
def hinton(W, maxWeight=None):
    """
    Source: http://wiki.scipy.org/Cookbook/Matplotlib/HintonDiagrams
    Draws a Hinton diagram for visualizing a weight matrix.
    Temporarily disables matplotlib interactive mode if it is on,
    otherwise this takes forever.
    """
    reenable = False
    if pl.isinteractive():
        pl.ioff()
    pl.clf()
    height, width = W.shape
    if not maxWeight:
        maxWeight = 2**np.ceil(np.log(np.max(np.abs(W)))/np.log(2))

    pl.fill(np.array([0,width,width,0]),np.array([0,0,height,height]),'gray')
    pl.axis('off')
    pl.axis('equal')
    for x in xrange(width):
        for y in xrange(height):
            _x = x+1
            _y = y+1
            w = W[y,x]
            if w > 0:
                _blob(_x - 0.5, height - _y + 0.5, min(1,w/maxWeight),'white')
            elif w < 0:
                _blob(_x - 0.5, height - _y + 0.5, min(1,-w/maxWeight),'black')
    if reenable:
        pl.ion()
    pl.show()
Exemple #17
0
def plotDirections(aabb=(),mask=0,bins=20,numHist=True,noShow=False,sphSph=False):
	"""Plot 3 histograms for distribution of interaction directions, in yz,xz and xy planes and
	(optional but default) histogram of number of interactions per body. If sphSph only sphere-sphere interactions are considered for the 3 directions histograms.

	:returns: If *noShow* is ``False``, displays the figure and returns nothing. If *noShow*, the figure object is returned without being displayed (works the same way as :yref:`yade.plot.plot`).
	"""
	import pylab,math
	from yade import utils
	for axis in [0,1,2]:
		d=utils.interactionAnglesHistogram(axis,mask=mask,bins=bins,aabb=aabb,sphSph=sphSph)
		fc=[0,0,0]; fc[axis]=1.
		subp=pylab.subplot(220+axis+1,polar=True);
		# 1.1 makes small gaps between values (but the column is a bit decentered)
		pylab.bar(d[0],d[1],width=math.pi/(1.1*bins),fc=fc,alpha=.7,label=['yz','xz','xy'][axis])
		#pylab.title(['yz','xz','xy'][axis]+' plane')
		pylab.text(.5,.25,['yz','xz','xy'][axis],horizontalalignment='center',verticalalignment='center',transform=subp.transAxes,fontsize='xx-large')
	if numHist:
		pylab.subplot(224,polar=False)
		nums,counts=utils.bodyNumInteractionsHistogram(aabb if len(aabb)>0 else utils.aabbExtrema())
		avg=sum([nums[i]*counts[i] for i in range(len(nums))])/(1.*sum(counts))
		pylab.bar(nums,counts,fc=[1,1,0],alpha=.7,align='center')
		pylab.xlabel('Interactions per body (avg. %g)'%avg)
		pylab.axvline(x=avg,linewidth=3,color='r')
		pylab.ylabel('Body count')
	if noShow: return pylab.gcf()
	else:
		pylab.ion()
		pylab.show()
def plotSpectrum(spectrum,filename=""):
		pylab.ion() 	
		pylab.figure(0) 	
		pylab.clf() 	
		pylab.plot(spectrum)
		pylab.draw()
		pylab.savefig(filename)
def plotSpectrum6(spectrum1On,spectrum2On,spectrum3On,spectrum4On,spectrum1Off,spectrum2Off,spectrum3Off,spectrum4Off,thetaL,thetaR,filename=""):
		pylab.ion() 	
		pylab.figure(0) 	
		pylab.clf() 
		pylab.grid()
		pylab.subplot(321)
		pylab.plot(spectrum1On,'r')
		pylab.plot(spectrum1Off)
		pylab.title('Channel1')
		pylab.subplot(322)
		pylab.title('Channel2')
		pylab.plot(spectrum2On,'r')
		pylab.plot(spectrum2Off)
		pylab.subplot(323)
		pylab.title('Channel3')
		pylab.plot(spectrum3On,'r')
		pylab.plot(spectrum3Off)
		pylab.subplot(324)
		pylab.title('Channel4')
		pylab.plot(spectrum4On,'r')
		pylab.plot(spectrum4Off)
		pylab.subplot(325)
		pylab.title('ThetaL')
		pylab.plot(thetaL,'r')
		pylab.subplot(326)
		pylab.title('ThetaR')
		pylab.plot(thetaR,'r')
		pylab.draw()
		pylab.savefig(filename)
Exemple #20
0
 def save_plot(self, filename):
     plt.ion()
     targarr = np.array(self.targvalue)
     self.posi[0].set_xdata(self.wt_positions[:,0])
     self.posi[0].set_ydata(self.wt_positions[:,1])
     while len(self.plotel)>0:
         self.plotel.pop(0).remove()
     self.plotel = self.shape_plot.plot(np.array([self.wt_positions[[i,j],0] for i, j in self.elnet_layout.keys()]).T,
                                np.array([self.wt_positions[[i,j],1]  for i, j in self.elnet_layout.keys()]).T, 'y-', linewidth=1)
     for i in range(len(self.posb)):
         self.posb[i][0].set_xdata(self.iterations)
         self.posb[i][0].set_ydata(targarr[:,i])
         self.legend.texts[i].set_text('%s = %8.2f'%(self.targname[i], targarr[-1,i]))
     self.objf_plot.set_xlim([0, self.iterations[-1]])
     self.objf_plot.set_ylim([0.5, 1.2])
     if not self.title == '':
         plt.title('%s = %8.2f'%(self.title, getattr(self, self.title)))
     plt.draw()
     #print self.iterations[-1] , ': ' + ', '.join(['%s=%6.2f'%(self.targname[i], targarr[-1,i]) for i in range(len(self.targname))])
     with open(self.result_file+'.results','a') as f:
         f.write( '%d:'%(self.inc) + ', '.join(['%s=%6.2f'%(self.targname[i], targarr[-1,i]) for i in range(len(self.targname))]) +
             '\n')
     #plt.show()
     #plt.savefig(filename)
     display(plt.gcf())
     #plt.show()
     clear_output(wait=True)
    def image_loop(self, decay):
        import pylab
        fig = pylab.figure()
        pylab.ion()
        img = pylab.imshow(self.image, vmax=1, vmin=-1,
                                       interpolation='none', cmap='binary')

        if self.track_periods is not None:
            colors = ([(0,0,1), (0,1,0), (1,0,0), (1,1,0), (1,0,1)] * 10)[:len(self.p_y)]
            scatter = pylab.scatter(self.p_y, self.p_x, s=50, c=colors)
        else:
            scatter = None

        while True:
            #fig.clear()
            #print self.track_periods
            #pylab.plot(self.delta)
            #pylab.hist(self.delta, 50, range=(0000, 15000))
            img.set_data(self.image)
            if scatter is not None:
                scatter.set_offsets(np.array([self.p_y, self.p_x]).T)
                c = [(r,g,b,min(self.track_certainty[i],1)) for i,(r,g,b) in enumerate(colors)]
                scatter.set_color(c)
            if display_mode == 'quick':
                # this is much faster, but doesn't work on all systems
                fig.canvas.draw()
                fig.canvas.flush_events()
            else:
                # this works on all systems, but is kinda slow
                pylab.pause(0.001)
            self.image *= decay
def run():
    colors = [
        'b', 'g', 'r', 'c', 'm', 'y', 'k',
        'b--', 'g--', 'r--', 'c--', 'm--', 'y--', 'k--',
        'bo', 'go', 'ro', 'co', 'mo', 'yo', 'ko',
        'b+', 'g+', 'r+', 'c+', 'm+', 'y+', 'k+',
        'b*', 'g*', 'r*', 'c*', 'm*', 'y*', 'k*',
        'b|', 'g|', 'r|', 'c|', 'm|', 'y|', 'k|',
    ]
    plots = defaultdict(list)
    heap_size = []
    order = ['Heap change']
    manager = pylab.get_current_fig_manager()
    manager.resize(1400, 1350)
    pylab.ion()

    for entry in read_data():
        heap_size.append(entry["after"]["size_bytes"])

        pylab.subplot(2, 1, 1)
        pylab.plot(heap_size, 'r', label='Heap size')
        pylab.legend(["Heap size"], loc=2)

        pylab.subplot(2, 1, 2)
        plots["Heap change"].append(entry["change"]["size_bytes"])
        for thing in entry["change"]["details"]:
            if thing["what"] not in order:
                order.append(thing["what"])
            plots[thing["what"]].append(thing["size_bytes"])

        for what, color in zip(order, colors):
            pylab.plot(plots[what], color, label=what)
        pylab.legend(order, loc=3)
        pylab.draw()
     def run(self):
         plts = {}
         graphs = {}
         pos  = 0

         plt.ion()
         plt.style.use('ggplot')

         for name in sorted(self.names.values()):
             p = plt.subplot(math.ceil(len(self.names) / 2), 2, pos+1)
             p.set_ylim([0, 100])
             p.set_title(self.machine_classes[name] + " " + name)

             p.get_xaxis().set_visible(False)

             X = range(0, NUM_ENTRIES, 1)
             Y = NUM_ENTRIES * [0]
             graphs[name] = p.plot(X, Y)[0]

             plts[name] = p
             pos += 1 

         plt.tight_layout()
         
         while True:
             for name, p in plts.items():
                 graphs[name].set_ydata(self.loads[name])

             plt.draw()
             plt.pause(0.05)
Exemple #24
0
 def sintest(self, AMP=0.5, FREQ=1, TIME=5, mode='velocity'):
 
     # freq in hertz
     # amp in radians
     
     self.tstart = time.time()
     t = time.time()-self.tstart
     self.setpos(0)
     pylab.ion()
     
     print('**Running Sin Test**')
     
     while t<TIME:
         t = time.time()-self.tstart
         ctrl = AMP*np.sin(FREQ*2.0*np.pi*t)
         
         if mode == 'velocity':
             self.setvel(ctrl)
             output = self.getvel()
         if mode == 'pos':
             self.setpos(ctrl)
             output = self.getpos()
         
         pylab.plot([t],[output],color='blue', marker='*')
         pylab.plot([t],[ctrl],color='red')
         pylab.draw()
     self.stop()
Exemple #25
0
 def __init__(self, net, task, valueNetwork=None, **args):
     self.net = net
     self.task = task
     self.setArgs(**args)
     if self.valueLearningRate == None:
         self.valueLearningRate = self.learningRate
     if self.valueMomentum == None:
         self.valueMomentum = self.momentum        
     if self.supervisedPlotting:
         from pylab import ion
         ion() 
     
     # adaptive temperature:
     self.tau = 1.
     
     # prepare the datasets to be used
     self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
     self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
     self.valueDs = SequentialDataSet(self.task.outdim, 1)
     
     # prepare the supervised trainers
     self.bp = BackpropTrainer(self.net, self.weightedDs, self.learningRate,
                               self.momentum, verbose=False,
                               batchlearning=True)            
     
     # CHECKME: outsource
     self.vnet = valueNetwork
     if valueNetwork != None:
         self.vbp = BackpropTrainer(self.vnet, self.valueDs, self.valueLearningRate,
                                    self.valueMomentum, verbose=self.verbose)
         
     # keep information:
     self.totalSteps = 0
     self.totalEpisodes = 0
def plotInit(Plotting, Elements):
	if (Plotting == 2):
		loc = [i.xy for i in Elements]
		x = [i.real for i in loc]
		y = [i.imag for i in loc]
		x = list(sorted(set(x))) 
		x.remove(-10)
		y = list(sorted(set(y)))

		X, Y = pylab.meshgrid(x, y)
		U = pylab.ones(shape(X))
		V = pylab.ones(shape(Y))

		pylab.ion()
		fig, ax = pylab.subplots(1,1)
		graph = ax.quiver(X, Y, U, V)
		pylab.draw()
	else:
		pylab.ion()
		graph, = pylab.plot(1, 'ro', markersize = 2) 
		x = 2
		pylab.axis([-x,x,x,-x])

		graph.set_xdata(0)
		graph.set_ydata(0)
		pylab.draw()

	return graph
Exemple #27
0
	def histoCase( self ):
		print "Building Histogram"
		
		header = self.histoAxis.get()
		
		if ( self.filterNum.get() == "None" ):
			data = self.dataInstance.getNumericAxis( self.histoAxis.get() )
		else:
			labels = []
			data = []
			for set in self.main.histoCols:
				labels.append( set[0] )
				data.append( set[1] )
		
		pylab.ion()
		if ( self.filterNum.get() == "None" ):
			pylab.hist( data, bins=self.bins.get(), label=header )
		else:
			pylab.hist( data, bins=self.bins.get(), label=labels )

		pylab.legend()
		pylab.xlabel( header )
		pylab.ylabel("Frequency")
		pylab.title("Histogram" )

		pylab.show()
def animate_1D(time, var, x, ylab=' '): 
    """Animate a 2d array with a sequence of 1d plots

     Input: time = one-dimensional time vector;
            var =  array with first dimension = len(time) ;
            x = (optional) vector width dimension equal to var.shape[1];
            ylab = ylabel for plot
    """
    
    import pylab
    import numpy
   
    

    pylab.close()
    pylab.ion()

    # Initial plot
    vmin=var.min()
    vmax=var.max()
    line, = pylab.plot( (x.min(), x.max()), (vmin, vmax), 'o')

    # Lots of plots
    for i in range(len(time)):
        line.set_xdata(x)
        line.set_ydata(var[i,:])
        pylab.draw()
        pylab.xlabel('x')
        pylab.ylabel(ylab)
        pylab.title('time = ' + str(time[i]))
    
    return
Exemple #29
0
 def __init__(self,file):
     # Task parameters
     self.running = True
     
     # Class variables
     self.origin = Vector(0,0)
     self.position = Vector(0,0)
     self.position_list = []
     
     # init plot
     self.fig = pyplot.figure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')
     self.area = 2
     ion()
     
     # Init transform
     self.tf = tf.TransformListener()
     self.br = tf.TransformBroadcaster()
     self.quaternion = np.empty((4, ), dtype=np.float64)
     
     # Init node
     self.rate = rospy.Rate(10)
     
     # Init subscriber
     self.imu_sub = rospy.Subscriber('/fmInformation/imu', Imu, self.onImu )
     
     # Init stat
     self.file = file
     self.deviations = []    
def saveHintonDiagram(W, directory):
    maxWeight = None
    #print "Weight: ", W
    """
    Draws a Hinton diagram for visualizing a weight matrix. 
    Temporarily disables matplotlib interactive mode if it is on, 
    otherwise this takes forever.
    """
    reenable = False
    if pylab.isinteractive():
        pylab.ioff()
    pylab.clf()
    height, width = W.shape
    if not maxWeight:
        maxWeight = 2**numpy.ceil(numpy.log(numpy.max(numpy.abs(W)))/numpy.log(2))

    pylab.fill(numpy.array([0,width,width,0]),numpy.array([0,0,height,height]),'gray')
    pylab.axis('off')
    pylab.axis('equal')
    for x in xrange(width):
        for y in xrange(height):
            _x = x+1
            _y = y+1
            w = W[y,x]
            if w > 0:
                _blob(_x - 0.5, height - _y + 0.5, min(1,w/maxWeight),'white')
            elif w < 0:
                _blob(_x - 0.5, height - _y + 0.5, min(1,-w/maxWeight),'black')
    if reenable:
        pylab.ion()
    #pylab.show()
    pylab.savefig(directory)
Exemple #31
0
def evolve(nx, kappa, tau, tmax, dovis=0, returnInit=0):
    """ 
    the main evolution loop.  Evolve 

     phi_t = kappa phi_{xx} + (1/tau) R(phi)
    from t = 0 to tmax
    """

    # create the grid
    gr = grid(nx, ng=1, xmin=0.0, xmax=100.0,
              vars=["phi", "phi1", "phi2"])

    # pointers to the data at various stages
    phi = gr.data["phi"]
    phi1 = gr.data["phi1"]
    phi2 = gr.data["phi2"]

    # initialize
    gr.initialize(20, 1)

    phiInit = phi.copy()

    # runtime plotting
    if dovis == 1:
        pylab.ion()

    t = 0.0
    while (t < tmax):

        dt = estDt(gr, kappa, tau)

        if (t + dt > tmax):
            dt = tmax - t

        # react for dt/2
        phi1[:] = react(gr, phi, tau, dt / 2)
        gr.fillBC("phi1")

        # diffuse for dt
        phi2[:] = diffuse(gr, phi1, kappa, dt)
        gr.fillBC("phi2")

        # react for dt/2 -- this is the updated solution
        phi[:] = react(gr, phi2, tau, dt / 2)
        gr.fillBC("phi")

        t += dt

        if dovis == 1:
            pylab.clf()
            pylab.plot(gr.x, phi)
            pylab.xlim(gr.xmin, gr.xmax)
            pylab.ylim(0.0, 1.0)
            pylab.draw()

    print(t)

    if returnInit == 1:
        return phi, gr.x, phiInit
    else:
        return phi, gr.x
Exemple #32
0
import numpy as np
import pylab as py
import pickle as pk
import covariance_utils as cu
import fisher.forecast.fisher_util as ut

py.ion()

theory_file = 'camb_spectra2/params_LCDM_planckbestfit_r_0.01_lensedtotCls.dat'
tell, tTT, tEE, tBB, tTE = ut.read_spectra(theory_file, raw=False)

nsims = 10
sim_runs = 1
#simlength = 100
simlength = len(tell)
delta_bin = 50
fft_scale = 16
map_scale = 4.
window_scale = 32.
noise_scale = 0.00001  #ratio of signal_scale
signal_scale = 1.0
order = 3
apply_window = True
noise_off = False
#################################################################################

x = np.arange(simlength + 1, dtype=float) / (simlength) * 10. * np.pi

#Truncate input spectra to be in multiples of delta_bin
extra = np.mod(len(tTT), delta_bin)
Exemple #33
0
        get_ipython().magic('load_ext autoreload')
        get_ipython().magic('autoreload 2')
except NameError:
    print('Not IPYTHON')
    pass

import sys
import numpy as np
import psutil
import glob
import os
import scipy
from ipyparallel import Client
# mpl.use('Qt5Agg')
import pylab as pl
pl.ion()
#%%
import caiman as cm
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob

#%%
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                 n_processes=None,
                                                 single_thread=False)
#%%
is_patches = True
is_dendrites = False  #
Exemple #34
0
# You should have received a copy of the GNU General Public License
# along with NEST.  If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example

Create layer of 4x3 iaf_psc_alpha neurons, visualize

BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''

import nest
import pylab
import nest.topology as topo

pylab.ion()

nest.ResetKernel()

l1 = topo.CreateLayer({
    'columns': 4,
    'rows': 3,
    'extent': [2.0, 1.5],
    'elements': 'iaf_psc_alpha'
})

nest.PrintNetwork()
nest.PrintNetwork(2)
nest.PrintNetwork(2, l1)

topo.PlotLayer(l1, nodesize=50)
Exemple #35
0
        z = z * z + c
        if (z.real * z.real + z.imag * z.imag) >= 4:
            return i

    return 255


@autojit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
    height = image.shape[0]
    width = image.shape[1]

    pixel_size_x = (max_x - min_x) / width
    pixel_size_y = (max_y - min_y) / height
    for x in range(width):
        real = min_x + x * pixel_size_x
        for y in range(height):
            imag = min_y + y * pixel_size_y
            color = mandel(real, imag, iters)
            image[y, x] = color

    return image


image = np.zeros((1024, 1024), dtype=np.uint8)
#image = np.zeros((500, 750), dtype=np.uint8)
imshow(create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20))
jet()
ion()
show()
Exemple #36
0
def orthview(
    map=None,
    fig=None,
    rot=None,
    coord=None,
    unit="",
    xsize=800,
    half_sky=False,
    title="Orthographic view",
    nest=False,
    min=None,
    max=None,
    flip="astro",
    remove_dip=False,
    remove_mono=False,
    gal_cut=0,
    format="%g",
    format2="%g",
    cbar=True,
    cmap=None,
    notext=False,
    norm=None,
    hold=False,
    margins=None,
    sub=None,
    return_projected_map=False,
):
    """Plot a healpix map (given as an array) in Orthographic projection.
    
    Parameters
    ----------
    map : float, array-like or None
      An array containing the map.
      If None, will display a blank map, useful for overplotting.
    fig : int or None, optional
      The figure number to use. Default: create a new figure
    rot : scalar or sequence, optional
      Describe the rotation to apply.
      In the form (lon, lat, psi) (unit: degrees) : the point at
      longitude *lon* and latitude *lat* will be at the center. An additional rotation
      of angle *psi* around this direction is applied.
    coord : sequence of character, optional
      Either one of 'G', 'E' or 'C' to describe the coordinate
      system of the map, or a sequence of 2 of these to rotate
      the map from the first to the second coordinate system.
    half_sky : bool, optional
      Plot only one side of the sphere. Default: False
    unit : str, optional
      A text describing the unit of the data. Default: ''
    xsize : int, optional
      The size of the image. Default: 800
    title : str, optional
      The title of the plot. Default: 'Orthographic view'
    nest : bool, optional
      If True, ordering scheme is NESTED. Default: False (RING)
    min : float, optional
      The minimum range value
    max : float, optional
      The maximum range value
    flip : {'astro', 'geo'}, optional
      Defines the convention of projection : 'astro' (default, east towards left, west towards right)
      or 'geo' (east towards roght, west towards left)
    remove_dip : bool, optional
      If :const:`True`, remove the dipole+monopole
    remove_mono : bool, optional
      If :const:`True`, remove the monopole
    gal_cut : float, scalar, optional
      Symmetric galactic cut for the dipole/monopole fit.
      Removes points in latitude range [-gal_cut, +gal_cut]
    format : str, optional
      The format of the scale label. Default: '%g'
    format2 : str, optional
      Format of the pixel value under mouse. Default: '%g'
    cbar : bool, optional
      Display the colorbar. Default: True
    notext : bool, optional
      If True, no text is printed around the map
    norm : {'hist', 'log', None}
      Color normalization, hist= histogram equalized color mapping,
      log= logarithmic color mapping, default: None (linear color mapping)
    hold : bool, optional
      If True, replace the current Axes by an OrthographicAxes.
      use this if you want to have multiple maps on the same
      figure. Default: False
    sub : int, scalar or sequence, optional
      Use only a zone of the current figure (same syntax as subplot).
      Default: None
    margins : None or sequence, optional
      Either None, or a sequence (left,bottom,right,top)
      giving the margins on left,bottom,right and top
      of the axes. Values are relative to figure (0-1).
      Default: None
    return_projected_map : bool
      if True returns the projected map in a 2d numpy array
    
    See Also
    --------
    mollview, gnomview, cartview, azeqview
    """
    # Create the figure
    import pylab

    if not (hold or sub):
        f = pylab.figure(fig, figsize=(8.5, 5.4))
        extent = (0.02, 0.05, 0.96, 0.9)
    elif hold:
        f = pylab.gcf()
        left, bottom, right, top = np.array(f.gca().get_position()).ravel()
        extent = (left, bottom, right - left, top - bottom)
        f.delaxes(f.gca())
    else:  # using subplot syntax
        f = pylab.gcf()
        if hasattr(sub, "__len__"):
            nrows, ncols, idx = sub
        else:
            nrows, ncols, idx = sub // 100, (sub % 100) // 10, (sub % 10)
        if idx < 1 or idx > ncols * nrows:
            raise ValueError("Wrong values for sub: %d, %d, %d" % (nrows, ncols, idx))
        c, r = (idx - 1) % ncols, (idx - 1) // ncols
        if not margins:
            margins = (0.01, 0.0, 0.0, 0.02)
        extent = (
            c * 1. / ncols + margins[0],
            1. - (r + 1) * 1. / nrows + margins[1],
            1. / ncols - margins[2] - margins[0],
            1. / nrows - margins[3] - margins[1],
        )
        extent = (
            extent[0] + margins[0],
            extent[1] + margins[1],
            extent[2] - margins[2] - margins[0],
            extent[3] - margins[3] - margins[1],
        )
        # extent = (c*1./ncols, 1.-(r+1)*1./nrows,1./ncols,1./nrows)
    # f=pylab.figure(fig,figsize=(8.5,5.4))

    # Starting to draw : turn interactive off
    wasinteractive = pylab.isinteractive()
    pylab.ioff()
    try:
        if map is None:
            map = np.zeros(12) + np.inf
            cbar = False
        ax = PA.HpxOrthographicAxes(
            f, extent, coord=coord, rot=rot, format=format2, flipconv=flip
        )
        f.add_axes(ax)
        if remove_dip:
            map = pixelfunc.remove_dipole(
                map, gal_cut=gal_cut, nest=nest, copy=True, verbose=True
            )
        elif remove_mono:
            map = pixelfunc.remove_monopole(
                map, gal_cut=gal_cut, nest=nest, copy=True, verbose=True
            )
        img = ax.projmap(
            map,
            nest=nest,
            xsize=xsize,
            half_sky=half_sky,
            coord=coord,
            vmin=min,
            vmax=max,
            cmap=cmap,
            norm=norm,
        )
        if cbar:
            im = ax.get_images()[0]
            b = im.norm.inverse(np.linspace(0, 1, im.cmap.N + 1))
            v = np.linspace(im.norm.vmin, im.norm.vmax, im.cmap.N)
            if matplotlib.__version__ >= "0.91.0":
                cb = f.colorbar(
                    im,
                    ax=ax,
                    orientation="horizontal",
                    shrink=0.5,
                    aspect=25,
                    ticks=PA.BoundaryLocator(),
                    pad=0.05,
                    fraction=0.1,
                    boundaries=b,
                    values=v,
                    format=format,
                )
            else:
                # for older matplotlib versions, no ax kwarg
                cb = f.colorbar(
                    im,
                    orientation="horizontal",
                    shrink=0.5,
                    aspect=25,
                    ticks=PA.BoundaryLocator(),
                    pad=0.05,
                    fraction=0.1,
                    boundaries=b,
                    values=v,
                    format=format,
                )
            cb.solids.set_rasterized(True)
        ax.set_title(title)
        if not notext:
            ax.text(
                0.86,
                0.05,
                ax.proj.coordsysstr,
                fontsize=14,
                fontweight="bold",
                transform=ax.transAxes,
            )
        if cbar:
            cb.ax.text(
                0.5,
                -1.0,
                unit,
                fontsize=14,
                transform=cb.ax.transAxes,
                ha="center",
                va="center",
            )
        f.sca(ax)
    finally:
        pylab.draw()
        if wasinteractive:
            pylab.ion()
            # pylab.show()
    if return_projected_map:
        return img
Exemple #37
0
def gnomview(
    map=None,
    fig=None,
    rot=None,
    coord=None,
    unit="",
    xsize=200,
    ysize=None,
    reso=1.5,
    title="Gnomonic view",
    nest=False,
    remove_dip=False,
    remove_mono=False,
    gal_cut=0,
    min=None,
    max=None,
    flip="astro",
    format="%.3g",
    cbar=True,
    cmap=None,
    norm=None,
    hold=False,
    sub=None,
    margins=None,
    notext=False,
    return_projected_map=False,
):
    """Plot a healpix map (given as an array) in Gnomonic projection.

    Parameters
    ----------
    map : array-like
      The map to project, supports masked maps, see the `ma` function.
      If None, use a blank map, useful for
      overplotting.
    fig : None or int, optional
      A figure number. Default: None= create a new figure
    rot : scalar or sequence, optional
      Describe the rotation to apply.
      In the form (lon, lat, psi) (unit: degrees) : the point at
      longitude *lon* and latitude *lat* will be at the center. An additional rotation
      of angle *psi* around this direction is applied.
    coord : sequence of character, optional
      Either one of 'G', 'E' or 'C' to describe the coordinate
      system of the map, or a sequence of 2 of these to rotate
      the map from the first to the second coordinate system.
    unit : str, optional
      A text describing the unit of the data. Default: ''
    xsize : int, optional
      The size of the image. Default: 200
    ysize : None or int, optional
      The size of the image. Default: None= xsize
    reso : float, optional
      Resolution (in arcmin). Default: 1.5 arcmin
    title : str, optional
      The title of the plot. Default: 'Gnomonic view'
    nest : bool, optional
      If True, ordering scheme is NESTED. Default: False (RING)
    min : float, scalar, optional
      The minimum range value
    max : float, scalar, optional
      The maximum range value
    flip : {'astro', 'geo'}, optional
      Defines the convention of projection : 'astro' (default, east towards left, west towards right)
      or 'geo' (east towards roght, west towards left)
    remove_dip : bool, optional
      If :const:`True`, remove the dipole+monopole
    remove_mono : bool, optional
      If :const:`True`, remove the monopole
    gal_cut : float, scalar, optional
      Symmetric galactic cut for the dipole/monopole fit.
      Removes points in latitude range [-gal_cut, +gal_cut]
    format : str, optional
      The format of the scale label. Default: '%g'
    hold : bool, optional
      If True, replace the current Axes by a MollweideAxes.
      use this if you want to have multiple maps on the same
      figure. Default: False
    sub : int or sequence, optional
      Use only a zone of the current figure (same syntax as subplot).
      Default: None
    margins : None or sequence, optional
      Either None, or a sequence (left,bottom,right,top)
      giving the margins on left,bottom,right and top
      of the axes. Values are relative to figure (0-1).
      Default: None
    notext: bool, optional
      If True: do not add resolution info text. Default=False
    return_projected_map : bool
      if True returns the projected map in a 2d numpy array

    See Also
    --------
    mollview, cartview, orthview, azeqview
    """
    import pylab

    if not (hold or sub):
        f = pylab.figure(fig, figsize=(5.8, 6.4))
        if not margins:
            margins = (0.075, 0.05, 0.075, 0.05)
        extent = (0.0, 0.0, 1.0, 1.0)
    elif hold:
        f = pylab.gcf()
        left, bottom, right, top = np.array(pylab.gca().get_position()).ravel()
        if not margins:
            margins = (0.0, 0.0, 0.0, 0.0)
        extent = (left, bottom, right - left, top - bottom)
        f.delaxes(pylab.gca())
    else:  # using subplot syntax
        f = pylab.gcf()
        if hasattr(sub, "__len__"):
            nrows, ncols, idx = sub
        else:
            nrows, ncols, idx = sub // 100, (sub % 100) // 10, (sub % 10)
        if idx < 1 or idx > ncols * nrows:
            raise ValueError("Wrong values for sub: %d, %d, %d" % (nrows, ncols, idx))
        c, r = (idx - 1) % ncols, (idx - 1) // ncols
        if not margins:
            margins = (0.01, 0.0, 0.0, 0.02)
        extent = (
            c * 1. / ncols + margins[0],
            1. - (r + 1) * 1. / nrows + margins[1],
            1. / ncols - margins[2] - margins[0],
            1. / nrows - margins[3] - margins[1],
        )
    extent = (
        extent[0] + margins[0],
        extent[1] + margins[1],
        extent[2] - margins[2] - margins[0],
        extent[3] - margins[3] - margins[1],
    )
    # f=pylab.figure(fig,figsize=(5.5,6))

    # Starting to draw : turn interactive off
    wasinteractive = pylab.isinteractive()
    pylab.ioff()
    try:
        if map is None:
            map = np.zeros(12) + np.inf
            cbar = False
        map = pixelfunc.ma_to_array(map)
        ax = PA.HpxGnomonicAxes(
            f, extent, coord=coord, rot=rot, format=format, flipconv=flip
        )
        f.add_axes(ax)
        if remove_dip:
            map = pixelfunc.remove_dipole(map, gal_cut=gal_cut, nest=nest, copy=True)
        elif remove_mono:
            map = pixelfunc.remove_monopole(map, gal_cut=gal_cut, nest=nest, copy=True)
        img = ax.projmap(
            map,
            nest=nest,
            coord=coord,
            vmin=min,
            vmax=max,
            xsize=xsize,
            ysize=ysize,
            reso=reso,
            cmap=cmap,
            norm=norm,
        )
        if cbar:
            im = ax.get_images()[0]
            b = im.norm.inverse(np.linspace(0, 1, im.cmap.N + 1))
            v = np.linspace(im.norm.vmin, im.norm.vmax, im.cmap.N)
            if matplotlib.__version__ >= "0.91.0":
                cb = f.colorbar(
                    im,
                    ax=ax,
                    orientation="horizontal",
                    shrink=0.5,
                    aspect=25,
                    ticks=PA.BoundaryLocator(),
                    pad=0.08,
                    fraction=0.1,
                    boundaries=b,
                    values=v,
                    format=format,
                )
            else:
                cb = f.colorbar(
                    im,
                    orientation="horizontal",
                    shrink=0.5,
                    aspect=25,
                    ticks=PA.BoundaryLocator(),
                    pad=0.08,
                    fraction=0.1,
                    boundaries=b,
                    values=v,
                    format=format,
                )
            cb.solids.set_rasterized(True)
        ax.set_title(title)
        if not notext:
            ax.text(
                -0.07,
                0.02,
                "%g '/pix,   %dx%d pix"
                % (
                    ax.proj.arrayinfo["reso"],
                    ax.proj.arrayinfo["xsize"],
                    ax.proj.arrayinfo["ysize"],
                ),
                fontsize=12,
                verticalalignment="bottom",
                transform=ax.transAxes,
                rotation=90,
            )
            ax.text(
                -0.07,
                0.6,
                ax.proj.coordsysstr,
                fontsize=14,
                fontweight="bold",
                rotation=90,
                transform=ax.transAxes,
            )
            lon, lat = np.around(ax.proj.get_center(lonlat=True), ax._coordprec)
            ax.text(
                0.5,
                -0.03,
                "(%g,%g)" % (lon, lat),
                verticalalignment="center",
                horizontalalignment="center",
                transform=ax.transAxes,
            )
        if cbar:
            cb.ax.text(
                1.05,
                0.30,
                unit,
                fontsize=14,
                fontweight="bold",
                transform=cb.ax.transAxes,
                ha="left",
                va="center",
            )
        f.sca(ax)
    finally:
        pylab.draw()
        if wasinteractive:
            pylab.ion()
            # pylab.show()
    if return_projected_map:
        return img
Exemple #38
0
def sofispec1Dredu(files,
                   _interactive,
                   _ext_trace,
                   _dispersionline,
                   _automaticex,
                   _verbose=False):
    # print "LOGX:: Entering `sofispec1Dredu` method/function in %(__file__)s"
    # % globals()
    import re
    import string
    import sys
    import os
    os.environ["PYRAF_BETA_STATUS"] = "1"
    import ntt
    try:
        import pyfits
    except:
        from astropy.io import fits as pyfits

    import numpy as np
    import datetime
    import pylab as pl
    from pyraf import iraf

    dv = ntt.dvex()
    now = datetime.datetime.now()
    datenow = now.strftime('20%y%m%d%H%M')
    MJDtoday = 55927 + (datetime.date.today() -
                        datetime.date(2012, 01, 01)).days
    scal = np.pi / 180.
    hdr0 = ntt.util.readhdr(re.sub('\n', '', files[0]))
    _gain = ntt.util.readkey3(hdr0, 'gain')
    _rdnoise = ntt.util.readkey3(hdr0, 'ron')
    std_sun, rastd_sun, decstd_sun, magstd_sun = ntt.util.readstandard(
        'standard_sofi_sun.txt')
    std_vega, rastd_vega, decstd_vega, magstd_vega = ntt.util.readstandard(
        'standard_sofi_vega.txt')
    std_phot, rastd_phot, decstd_phot, magstd_phot = ntt.util.readstandard(
        'standard_sofi_phot.txt')
    outputfile = []
    objectlist, RA, DEC = {}, {}, {}
    for img in files:
        img = re.sub('\n', '', img)
        hdr = ntt.util.readhdr(img)
        _ra = ntt.util.readkey3(hdr, 'RA')
        _dec = ntt.util.readkey3(hdr, 'DEC')
        _grism = ntt.util.readkey3(hdr, 'grism')
        _filter = ntt.util.readkey3(hdr, 'filter')
        _slit = ntt.util.readkey3(hdr, 'slit')
        cc_sun = np.arccos(
            np.sin(_dec * scal) * np.sin(decstd_sun * scal) +
            np.cos(_dec * scal) * np.cos(decstd_sun * scal) * np.cos(
                (_ra - rastd_sun) * scal)) * ((180 / np.pi) * 3600)
        cc_vega = np.arccos(
            np.sin(_dec * scal) * np.sin(decstd_vega * scal) +
            np.cos(_dec * scal) * np.cos(decstd_vega * scal) * np.cos(
                (_ra - rastd_vega) * scal)) * ((180 / np.pi) * 3600)
        cc_phot = np.arccos(
            np.sin(_dec * scal) * np.sin(decstd_phot * scal) +
            np.cos(_dec * scal) * np.cos(decstd_phot * scal) * np.cos(
                (_ra - rastd_phot) * scal)) * ((180 / np.pi) * 3600)
        if min(cc_sun) < 100:
            _type = 'sun'
        elif min(cc_phot) < 100:
            _type = 'stdp'
        elif min(cc_vega) < 100:
            _type = 'vega'
        else:
            _type = 'obj'
        if min(cc_phot) < 100:
            if _verbose:
                print img, 'phot', str(min(cc_phot)), str(
                    std_phot[np.argmin(cc_phot)])
            ntt.util.updateheader(
                img, 0, {
                    'stdname': [std_phot[np.argmin(cc_phot)], ''],
                    'magstd': [float(magstd_phot[np.argmin(cc_phot)]), '']
                })
        # ntt.util.updateheader(img,0,{'magstd':[float(magstd_phot[argmin(cc_phot)]),'']})
        elif min(cc_sun) < 100:
            if _verbose:
                print img, 'sun', str(min(cc_sun)), str(
                    std_sun[np.argmin(cc_sun)])
            ntt.util.updateheader(
                img, 0, {
                    'stdname': [std_sun[np.argmin(cc_sun)], ''],
                    'magstd': [float(magstd_sun[np.argmin(cc_sun)]), '']
                })
        # ntt.util.updateheader(img,0,{'magstd':[float(magstd_sun[argmin(cc_sun)]),'']})
        elif min(cc_vega) < 100:
            if _verbose:
                print img, 'vega', str(min(cc_vega)), str(
                    std_vega[np.argmin(cc_vega)])
            ntt.util.updateheader(
                img, 0, {
                    'stdname': [std_vega[np.argmin(cc_vega)], ''],
                    'magstd': [float(magstd_vega[np.argmin(cc_vega)]), '']
                })
        # ntt.util.updateheader(img,0,{'magstd':[float(magstd_vega[argmin(cc_vega)]),'']})
        else:
            if _verbose:
                print img, 'object'

        _OBID = (ntt.util.readkey3(hdr, 'esoid'))
        if _type not in objectlist:
            objectlist[_type] = {}
        if _grism not in objectlist[_type]:
            objectlist[_type][_grism] = {}
        if _OBID not in objectlist[_type][_grism]:
            objectlist[_type][_grism][_OBID] = []
        objectlist[_type][_grism][_OBID].append(img)

    if 'stdp' not in objectlist:
        print '###  warning: not photometric standard'
    else:
        print '### photometric standard in the list of object'
    if 'sun' not in objectlist:
        print '### warning: not telluric G standard (sun type)'
    else:
        print '### telluric G standard (sun type) in the list of object'
    if 'vega' not in objectlist:
        print '### warning: not telluric A standard (vega type)'
    else:
        print '### telluric A standard (vega type) in the list of object'

    iraf.noao(_doprint=0, Stdout=0)
    iraf.imred(_doprint=0, Stdout=0)
    iraf.specred(_doprint=0, Stdout=0)
    iraf.immatch(_doprint=0, Stdout=0)
    iraf.imutil(_doprint=0, Stdout=0)
    toforget = ['specred.apall', 'specred.transform']
    for t in toforget:
        iraf.unlearn(t)
    iraf.specred.apall.readnoi = _rdnoise
    iraf.specred.apall.gain = _gain
    iraf.specred.dispaxi = 2
    for _type in objectlist:
        for setup in objectlist[_type]:
            for _ID in objectlist[_type][setup]:
                listmerge = objectlist[_type][setup][_ID]
                listmerge = ntt.sortbyJD(listmerge)
                _object = ntt.util.readkey3(ntt.util.readhdr(listmerge[0]),
                                            'object')
                if string.count(_object, '/') or string.count(
                        _object, '.') or string.count(_object, ' '):
                    nameobj = string.split(_object, '/')[0]
                    nameobj = string.split(nameobj, ' ')[0]
                    nameobj = string.split(nameobj, '.')[0]
                else:
                    nameobj = _object
                _date = ntt.util.readkey3(ntt.util.readhdr(listmerge[0]),
                                          'date-night')
                outputimage = nameobj + '_' + _date + \
                    '_' + setup + '_merge_' + str(MJDtoday)
                outputimage = ntt.util.name_duplicate(listmerge[0],
                                                      outputimage, '')
                print '### setup= ', setup, ' name field= ', nameobj, ' merge image= ', outputimage, '\n'
                #################
                #  added to avoid crashing with a single frame
                #  header will not be updated with all info
                #################

                if len(listmerge) == 1:
                    ntt.util.delete(outputimage)
                    iraf.imutil.imcopy(listmerge[0],
                                       output=outputimage,
                                       verbose='no')
                    answ = 'n'
                else:
                    if os.path.isfile(outputimage) and _interactive:
                        answ = raw_input(
                            'combine frame of dithered spectra already created. Do you want to make it again [[y]/n] ? '
                        )
                        if not answ:
                            answ = 'y'
                    else:
                        answ = 'y'
#################
                if answ in ['Yes', 'y', 'Y', 'yes']:
                    if _interactive:
                        automaticmerge = raw_input(
                            '\n### Do you want to try to find the dither bethween frames automatically [[y]/n]'
                        )
                        if not automaticmerge:
                            automaticmerge = 'yes'
                        elif automaticmerge.lower() in ['y', 'yes']:
                            automaticmerge = 'yes'
                        else:
                            automaticmerge = 'no'
                    else:
                        automaticmerge = 'yes'
                    if automaticmerge == 'yes':
                        offset = 0
                        offsetvec = []
                        _center0 = ntt.sofispec1Ddef.findaperture(
                            listmerge[0], False)
                        _offset0 = ntt.util.readkey3(
                            ntt.util.readhdr(listmerge[0]), 'xcum')
                        print '\n### Try to merge spectra considering their offset along x axes .......'
                        f = open('_offset', 'w')
                        for img in listmerge:
                            _center = ntt.sofispec1Ddef.findaperture(
                                img, False)
                            _center2 = (
                                float(_center) +
                                (float(_offset0) - float(_center0))) * (-1)
                            _offset = (-1) * \
                                ntt.util.readkey3(
                                    ntt.util.readhdr(img), 'xcum')
                            if abs(_center2 - _offset) >= 20:
                                automaticmerge = 'no'
                                break
                            else:
                                offset3 = _center2
                            offsetvec.append(offset3)
                            line = str(offset3) + '   0\n'
                            f.write(line)
                        f.close()
                    if automaticmerge == 'yes':
                        print '### automatic merge .......... done'
                    else:
                        print '\n### warning: try identification of spectra position in interactive way '
                        offset = 0
                        offsetvec = []
                        _z1, _z2, goon = ntt.util.display_image(
                            listmerge[0], 1, '', '', False)
                        print '\n### find aperture on first frame and use it as reference position of ' \
                              'the spectra (mark with ' + '"' + 'm' + '"' + ')'
                        _center0 = ntt.sofispec1Ddef.findaperture(
                            listmerge[0], True)
                        _offset0 = ntt.util.readkey3(
                            ntt.util.readhdr(listmerge[0]), 'xcum')
                        print '\n### find the aperture on all the spectra frames (mark with ' + '"' + 'm' + '"' + ')'
                        f = open('_offset', 'w')
                        for img in listmerge:
                            print '\n### ', img
                            _z1, _z2, goon = ntt.util.display_image(
                                img, 1, '', '', False)
                            _center = ntt.sofispec1Ddef.findaperture(img, True)
                            _center2 = (
                                float(_center) +
                                (float(_offset0) - float(_center0))) * (-1)
                            _offset = (-1) * \
                                ntt.util.readkey3(
                                    ntt.util.readhdr(img), 'xcum')
                            print '\n### position from  dither header: ' + str(
                                _offset)
                            print '### position identified interactively: ' + str(
                                _center2)
                            offset3 = raw_input(
                                '\n### which is the right position [' +
                                str(_center2) + '] ?')
                            if not offset3:
                                offset3 = _center2
                            offsetvec.append(offset3)
                            line = str(offset3) + '   0\n'
                            f.write(line)
                        f.close()
                    print offsetvec
                    start = int(max(offsetvec) - min(offsetvec))
                    print start
                    f = open('_goodlist', 'w')
                    print listmerge
                    for img in listmerge:
                        f.write(img + '\n')
                    f.close()
                    ntt.util.delete(outputimage)
                    ntt.util.delete('_output.fits')
                    yy1 = pyfits.open(listmerge[0])[0].data[:, 10]
                    iraf.immatch.imcombine('@_goodlist',
                                           '_output',
                                           combine='sum',
                                           reject='none',
                                           offset='_offset',
                                           masktyp='',
                                           rdnoise=_rdnoise,
                                           gain=_gain,
                                           zero='mode',
                                           Stdout=1)

                    _head = pyfits.open('_output.fits')[0].header
                    if _head['NAXIS1'] < 1024:
                        stop = str(_head['NAXIS1'])
                    else:
                        stop = '1024'

                    iraf.imutil.imcopy('_output[' + str(start) + ':' + stop +
                                       ',*]',
                                       output=outputimage,
                                       verbose='no')

                    print outputimage
                    print len(listmerge)
                    hdr1 = ntt.util.readhdr(outputimage)
                    ntt.util.updateheader(
                        outputimage, 0, {
                            'SINGLEXP':
                            [False, 'TRUE if resulting from single exposure'],
                            'M_EPOCH':
                            [False, 'TRUE if resulting from multiple epochs'],
                            'EXPTIME': [
                                ntt.util.readkey3(hdr1, 'EXPTIME') *
                                len(listmerge),
                                'Total integration time per pixel (s)'
                            ],
                            'TEXPTIME': [
                                float(ntt.util.readkey3(hdr1, 'TEXPTIME')) *
                                len(listmerge),
                                'Total integration time of all exposures (s)'
                            ],
                            'APERTURE': [
                                2.778e-4 * float(
                                    re.sub('long_slit_', '',
                                           ntt.util.readkey3(hdr1, 'slit'))),
                                '[deg] Aperture diameter'
                            ],
                            'NOFFSETS': [2, 'Number of offset positions'],
                            'NUSTEP': [0, 'Number of microstep positions'],
                            'NJITTER': [
                                int(ntt.util.readkey3(hdr1, 'NCOMBINE') / 2),
                                'Number of jitter positions'
                            ]
                        })
                    hdr = ntt.util.readhdr(outputimage)
                    matching = [s for s in hdr.keys() if "IMCMB" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(outputimage,
                                         imcmb,
                                         delete='yes',
                                         update='yes',
                                         verify='no',
                                         Stdout=1)
                    if 'SKYSUB' in hdr.keys():
                        aaa = iraf.hedit(outputimage,
                                         'SKYSUB',
                                         delete='yes',
                                         update='yes',
                                         verify='no',
                                         Stdout=1)

                    mjdend = []
                    mjdstart = []
                    num = 0
                    for img in listmerge:
                        num = num + 1
                        hdrm = ntt.util.readhdr(img)
                        ntt.util.updateheader(
                            outputimage, 0, {
                                'PROV' + str(num): [
                                    ntt.util.readkey3(hdrm, 'ARCFILE'),
                                    'Originating file'
                                ],
                                'TRACE' + str(num): [img, 'Originating file']
                            })
                        mjdend.append(ntt.util.readkey3(hdrm, 'MJD-END'))
                        mjdstart.append(ntt.util.readkey3(hdrm, 'MJD-OBS'))
                    _dateobs = ntt.util.readkey3(
                        ntt.util.readhdr(listmerge[np.argmin(mjdstart)]),
                        'DATE-OBS')

                    _telapse = (max(mjdend) - min(mjdstart)) * \
                        60. * 60 * 24.  # *86400
                    _tmid = (max(mjdend) + min(mjdstart)) / 2

                    _title = str(_tmid)[0:9] + ' ' + str(ntt.util.readkey3(hdr, 'object')) + ' ' + str(
                        ntt.util.readkey3(hdr, 'grism')) + ' ' + \
                        str(ntt.util.readkey3(hdr, 'filter')) + \
                        ' ' + str(ntt.util.readkey3(hdr, 'slit'))
                    ntt.util.updateheader(
                        outputimage, 0, {
                            'MJD-OBS': [min(mjdstart), 'MJD start'],
                            'MJD-END': [max(mjdend), 'MJD end'],
                            'TELAPSE': [_telapse, 'Total elapsed time [days]'],
                            'TMID': [_tmid, '[d] MJD mid exposure'],
                            'TITLE': [_title, 'Dataset title'],
                            'DATE-OBS': [_dateobs, 'Date of observation']
                        })
                    # missing: merge airmass
                else:
                    print '\n### skip making again combined spectrum'
                objectlist[_type][setup][_ID] = [outputimage]
                print '\n### setup= ', setup, ' name field= ', nameobj, ' merge image= ', outputimage, '\n'
                if outputimage not in outputfile:
                    outputfile.append(outputimage)
                ntt.util.updateheader(
                    outputimage, 0,
                    {'FILETYPE': [42116, 'combine 2D spectra frame']})

    if _verbose:
        if 'obj' in objectlist:
            print objectlist['obj']
        if 'stdp' in objectlist:
            print objectlist['stdp']
        if 'sun' in objectlist:
            print objectlist['sun']
        if 'vega' in objectlist:
            print objectlist['vega']

    if 'obj' not in objectlist.keys():
        sys.exit('\n### error: no objects in the list')

    sens = {}
    print '\n############################################\n### extract the spectra  '
    # print objectlist
    for setup in objectlist['obj']:
        reduced = []
        for _ID in objectlist['obj'][setup]:
            for img in objectlist['obj'][setup][_ID]:
                hdr = ntt.util.readhdr(img)
                print '\n### next object\n ', img, ntt.util.readkey3(
                    hdr, 'object')
                _grism = ntt.util.readkey3(hdr, 'grism')
                _exptimeimg = ntt.util.readkey3(hdr, 'exptime')
                _JDimg = ntt.util.readkey3(hdr, 'JD')

                imgex = ntt.util.extractspectrum(img,
                                                 dv,
                                                 _ext_trace,
                                                 _dispersionline,
                                                 _interactive,
                                                 'obj',
                                                 automaticex=_automaticex)
                if imgex not in outputfile:
                    outputfile.append(imgex)
                ntt.util.updateheader(
                    imgex, 0, {
                        'FILETYPE': [42107, 'extracted 1D wave calib'],
                        'PRODCATG': [
                            'SCIENCE.' +
                            ntt.util.readkey3(hdr, 'tech').upper(),
                            'Data product category'
                        ]
                    })
                hdr = ntt.util.readhdr(imgex)
                matching = [s for s in hdr.keys() if "TRACE" in s]
                for imcmb in matching:
                    aaa = iraf.hedit(imgex,
                                     imcmb,
                                     delete='yes',
                                     update='yes',
                                     verify='no',
                                     Stdout=1)
                ntt.util.updateheader(imgex, 0,
                                      {'TRACE1': [img, 'Originating file']})

                if os.path.isfile('database/ap' +
                                  re.sub('_ex.fits', '', imgex)):
                    if 'database/ap' + re.sub('_ex.fits', '',
                                              imgex) not in outputfile:
                        outputfile.append('database/ap' +
                                          re.sub('_ex.fits', '', imgex))

                ###########################   telluric standard   #############
                if 'sun' in objectlist and setup in objectlist['sun']:
                    _type = 'sun'
                elif 'vega' in objectlist and setup in objectlist['vega']:
                    _type = 'vega'
                else:
                    _type = 'none'
                if _type in ['sun', 'vega']:
                    stdref = ntt.__path__[0] + '/standard/fits/' + str(
                        _type) + '.fits'
                    stdvec, airmassvec, JDvec = [], [], []
                    for _ID in objectlist[_type][setup]:
                        for std in objectlist[_type][setup][_ID]:
                            _airmassstd = ntt.util.readkey3(
                                ntt.util.readhdr(std), 'airmass')
                            _JDstd = ntt.util.readkey3(ntt.util.readhdr(std),
                                                       'JD')
                            JDvec.append(abs(_JDstd - _JDimg))
                            stdvec.append(std)
                            airmassvec.append(_airmassstd)
                    stdtelluric = stdvec[np.argmin(JDvec)]
                    _exptimestd = ntt.util.readkey3(
                        ntt.util.readhdr(stdtelluric), 'exptime')
                    _magstd = ntt.util.readkey3(ntt.util.readhdr(stdtelluric),
                                                'magstd')
                    print '\n\n ##### closer standard for telluric corrections  #### \n\n'
                    print stdtelluric, airmassvec[np.argmin(JDvec)]
                    stdtelluric_ex = ntt.util.extractspectrum(
                        stdtelluric,
                        dv,
                        False,
                        False,
                        _interactive,
                        'std',
                        automaticex=_automaticex)
                    if stdtelluric_ex not in outputfile:
                        outputfile.append(stdtelluric_ex)
                    ntt.util.updateheader(
                        stdtelluric_ex, 0,
                        {'FILETYPE': [42107, 'extracted 1D wave calib ']})
                    ntt.util.updateheader(
                        stdtelluric_ex, 0, {
                            'PRODCATG': [
                                'SCIENCE.' + ntt.util.readkey3(
                                    ntt.util.readhdr(stdtelluric_ex),
                                    'tech').upper(), 'Data product category'
                            ]
                        })

                    hdr = ntt.util.readhdr(stdtelluric_ex)
                    matching = [s for s in hdr.keys() if "TRACE" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(stdtelluric_ex,
                                         imcmb,
                                         delete='yes',
                                         update='yes',
                                         verify='no',
                                         Stdout=1)
                    ntt.util.updateheader(
                        stdtelluric_ex, 0,
                        {'TRACE1': [stdtelluric, 'Originating file']})
                    ###########################################################
                    #               SN tellurich calibration
                    imgf = re.sub('_ex.fits', '_f.fits', imgex)
                    imgf, senstelluric = ntt.sofispec1Ddef.calibrationsofi(
                        imgex, stdtelluric_ex, stdref, imgf, _interactive)
                    if imgf not in outputfile:
                        outputfile.append(imgf)
                    if senstelluric not in outputfile:
                        outputfile.append(senstelluric)
                    ntt.util.updateheader(
                        imgf,
                        0,
                        {
                            'FILETYPE': [42208, '1D wave calib, tell cor.'],
                            #                                                    'SNR': [ntt.util.StoN(imgf, 50),
                            'SNR': [
                                ntt.util.StoN2(imgf, False),
                                'Average signal to noise ratio per pixel'
                            ],
                            'TRACE1': [imgex, 'Originating file'],
                            'ASSON1': [
                                re.sub('_f.fits', '_2df.fits', imgf),
                                'Name of associated file'
                            ],
                            'ASSOC1': [
                                'ANCILLARY.2DSPECTRUM',
                                'Category of associated file'
                            ]
                        })
                    ###########################################################
                    imgd = ntt.efoscspec1Ddef.fluxcalib2d(
                        img, senstelluric)  # flux calibration 2d images
                    ntt.util.updateheader(
                        imgd, 0, {
                            'FILETYPE': [
                                42209,
                                '2D wavelength and flux calibrated spectrum'
                            ]
                        })
                    iraf.hedit(imgd,
                               'PRODCATG',
                               delete='yes',
                               update='yes',
                               verify='no')
                    hdrd = ntt.util.readhdr(imgd)
                    matching = [s for s in hdrd.keys() if "TRACE" in s]
                    for imcmb in matching:
                        aaa = iraf.hedit(imgd,
                                         imcmb,
                                         delete='yes',
                                         update='yes',
                                         verify='no',
                                         Stdout=1)
                    ntt.util.updateheader(
                        imgd, 0, {'TRACE1': [img, 'Originating file']})
                    if imgd not in outputfile:
                        outputfile.append(imgd)
                ###############################################################
                if 'stdp' in objectlist and setup in objectlist['stdp']:
                    print '\n #####  photometric calibration   ######\n '
                    standardfile = []
                    for _ID in objectlist['stdp'][setup]:
                        for stdp in objectlist['stdp'][setup][_ID]:
                            stdp_ex = ntt.util.extractspectrum(
                                stdp,
                                dv,
                                False,
                                _dispersionline,
                                _interactive,
                                'std',
                                automaticex=_automaticex)
                            standardfile.append(stdp_ex)
                            if stdp_ex not in outputfile:
                                outputfile.append(stdp_ex)
                            ntt.util.updateheader(
                                stdp_ex, 0, {
                                    'FILETYPE':
                                    [42107, 'extracted 1D wave calib'],
                                    'TRACE1': [stdp_ex, 'Originating file'],
                                    'PRODCATG': [
                                        'SCIENCE.' + ntt.util.readkey3(
                                            ntt.util.readhdr(stdp_ex),
                                            'tech').upper(),
                                        'Data product category'
                                    ]
                                })
                    print '\n### ', standardfile, ' \n'
                    if len(standardfile) >= 2:
                        standardfile0 = raw_input(
                            'which one do you want to use [' +
                            str(standardfile[0]) + '] ? ')
                        if not standardfile0:
                            standardfile0 = standardfile[0]
                    else:
                        standardfile0 = standardfile[0]
                    print standardfile0
                    stdpf = re.sub('_ex.fits', '_f.fits', standardfile0)
                    stdpf, senstelluric2 = ntt.sofispec1Ddef.calibrationsofi(
                        standardfile0, stdtelluric_ex, stdref, stdpf,
                        _interactive)
                    if stdpf not in outputfile:
                        outputfile.append(stdpf)
                    ntt.util.updateheader(
                        stdpf, 0, {
                            'FILETYPE': [42208, '1D wave calib, tell cor'],
                            'TRACE1': [stdp, 'Originating file']
                        })
                    stdname = ntt.util.readkey3(
                        ntt.util.readhdr(standardfile0), 'stdname')
                    standardfile = ntt.__path__[0] + '/standard/flux/' + stdname
                    xx, yy = ntt.util.ReadAscii2(standardfile)

                    crval1 = pyfits.open(stdpf)[0].header.get('CRVAL1')
                    cd1 = pyfits.open(stdpf)[0].header.get('CD1_1')
                    datastdpf, hdrstdpf = pyfits.getdata(stdpf, 0, header=True)
                    xx1 = np.arange(len(datastdpf[0][0]))
                    aa1 = crval1 + (xx1) * cd1
                    yystd = np.interp(aa1, xx, yy)
                    rcut = np.compress(
                        ((aa1 < 13000) | (aa1 > 15150)) &
                        ((11700 < aa1) | (aa1 < 11000)) & (aa1 > 10000) &
                        ((aa1 < 17800) | (aa1 > 19600)) & (aa1 < 24000),
                        datastdpf[0][0] / yystd)
                    aa11 = np.compress(
                        ((aa1 < 13000) | (aa1 > 15150)) &
                        ((11700 < aa1) | (aa1 < 11000)) & (aa1 > 10000) &
                        ((aa1 < 17800) | (aa1 > 19600)) & (aa1 < 24000), aa1)
                    yy1clean = np.interp(aa1, aa11, rcut)
                    aa1 = np.array(aa1)
                    yy1clean = np.array(yy1clean)
                    A = np.ones((len(rcut), 2), dtype=float)
                    A[:, 0] = aa11
                    result = np.linalg.lstsq(A, rcut)  # result=[zero,slope]
                    p = [result[0][1], result[0][0]]
                    yfit = ntt.util.pval(aa1, p)

                    pl.clf()
                    pl.ion()
                    pl.plot(aa1,
                            datastdpf[0][0] / yystd,
                            color='red',
                            label='std')
                    pl.plot(aa1, yfit, color='blue', label='fit')
                    pl.legend(numpoints=1, markerscale=1.5)
                    #   sens function sofi spectra
                    outputsens = 'sens_' + stdpf
                    ntt.util.delete(outputsens)
                    datastdpf[0][0] = yfit
                    pyfits.writeto(outputsens, np.float32(datastdpf), hdrstdpf)
                    #################
                    imgsc = re.sub('_ex.fits', '_sc.fits', imgex)
                    ntt.util.delete(imgsc)
                    crval2 = pyfits.open(imgf)[0].header.get('CRVAL1')
                    cd2 = pyfits.open(imgf)[0].header.get('CD1_1')
                    dataf, hdrf = pyfits.getdata(imgf, 0, header=True)
                    xx2 = np.arange(len(dataf[0][0]))
                    aa2 = crval2 + (xx2) * cd2
                    yyscale = np.interp(aa2, aa1, yfit)

                    dataf[0][0] = dataf[0][0] / yyscale
                    dataf[1][0] = dataf[1][0] / yyscale
                    dataf[2][0] = dataf[2][0] / yyscale
                    dataf[3][0] = dataf[3][0] / yyscale

                    pyfits.writeto(imgsc, np.float32(dataf), hdrf)
                    ntt.util.updateheader(
                        imgsc, 0, {
                            'SENSPHOT': [outputsens, 'sens used to flux cal'],
                            'FILETYPE':
                            [42208, '1D wave,flux calib, tell cor'],
                            'TRACE1': [imgf, 'Originating file']
                        })
                    #                ntt.util.updateheader(imgsc,0,{'FILETYPE':[42208,'1D wave,flux calib, tell cor']})
                    #                ntt.util.updateheader(imgsc,0,{'TRACE1':[imgf,'']})
                    print '\n### flux calibrated spectrum= ', imgf, ' with the standard= ', stdpf
                    if imgsc not in outputfile:
                        outputfile.append(imgsc)
                else:
                    print '\n### photometric calibrated not performed \n'

    print '\n### adding keywords for phase 3 ....... '
    reduceddata = ntt.util.rangedata(outputfile)
    f = open(
        'logfile_spec1d_' + str(reduceddata) + '_' + str(datenow) +
        '.raw.list', 'w')
    for img in outputfile:
        if str(img)[-5:] == '.fits':
            hdr = ntt.util.readhdr(img)
            # added for DR2
            if 'NCOMBINE' in hdr:
                _ncomb = ntt.util.readkey3(hdr, 'NCOMBINE')
            else:
                _ncomb = 1.0

            _effron = 12. * \
                (1 / np.sqrt(ntt.util.readkey3(hdr, 'ndit') * _ncomb)) * \
                np.sqrt(np.pi / 2)

            try:
                ntt.util.phase3header(img)  # phase 3 definitions
                ntt.util.updateheader(
                    img, 0, {
                        'quality': ['Final', ''],
                        'EFFRON':
                        [_effron, 'Effective readout noise per output (e-)']
                    })
                f.write(
                    ntt.util.readkey3(ntt.util.readhdr(img), 'arcfile') + '\n')
            except:
                print 'Warning: ' + img + ' is not a fits file'
    f.close()
    return outputfile, 'logfile_spec1d_' + str(reduceddata) + '_' + str(
        datenow) + '.raw.list'
def drawPlots():
    global plots, theWorld, time, times, num_households

    if plots == None or plots.canvas.manager.window == None:
        plots = PL.figure(2)
        PL.ion()
    PL.figure(2)
    PL.hold(True)
    PL.subplot(4, 3, 1)
    PL.cla()
    tot = theWorld.food_shared_totals
    food_shared_avg = [tot[i] / (i + 1) for i in range(len(tot))]
    PL.plot(food_shared_avg, color='black')
    PL.plot(theWorld.food_shared, color='pink')
    PL.plot(theWorld.brn_sharing, color='brown')
    PL.plot(theWorld.grn_sharing, color='green')
    PL.title("Food shared each year (BRN, GRN, tot) and average")
    PL.hold(True)
    PL.subplot(4, 3, 2)
    PL.cla()
    interval = 20
    step = 0.5
    b = [-interval + step * i for i in range(2 * int(interval / step) + 1)]
    if theWorld.hh_prestige:
        PL.hist(theWorld.hh_prestige, bins=b, color='brown')
        PL.title("hh count by prestige")
    PL.hold(True)
    PL.subplot(4, 3, 3)
    PL.cla()
    PL.plot(theWorld.populations, color='pink')
    PL.plot(theWorld.avg_pop, color='black')
    PL.plot(theWorld.avg_pop_100, color='blue')
    PL.title("Population, average, and 100-year average")
    PL.hold(True)
    PL.subplot(4, 3, 4)
    PL.cla()
    PL.plot(theWorld.avg_ages, color='pink')
    PL.plot(theWorld.avg_adult_ages, color='red')
    PL.plot(theWorld.avg_hh_age, color='black')
    PL.title(
        "Average household(black), forager(pink), and adult forager age (red) at end"
    )
    PL.hold(True)
    PL.subplot(4, 3, 5)
    PL.cla()
    interval = (W.max_founder_kin_span - W.min_founder_kin_span)
    step = 0.1
    b = [(W.min_founder_kin_span + step * i)
         for i in range(int(interval / step) + 1)]
    if theWorld.kinship_spans:
        PL.hist(theWorld.kinship_spans, bins=b, color='blue')
        #     PL.axis()
        PL.title("population count by kinship span")
    PL.hold(True)
    PL.subplot(4, 3, 6)
    PL.cla()
    interval = (W.max_founder_kin_span - W.min_founder_kin_span)
    step = 1
    b = [(W.min_founder_kin_span + step * i)
         for i in range(int(interval / step) + 1)]
    if theWorld.kinship_spans:
        PL.hist(theWorld.kinship_spans, bins=b, color='blue')
        #     PL.axis()
        PL.title("population count by kinship span")
    PL.hold(True)
    PL.subplot(4, 3, 7)
    PL.cla()
    interval = 10
    step = 0.05
    b = [step * i for i in range(int(interval / step) + 1)]
    if theWorld.hh_food_stored:
        PL.hist(theWorld.hh_food_stored, bins=b, color='cyan')
        #     PL.axis()
        PL.title("hh counts by food stored")
    PL.hold(True)
    PL.subplot(4, 3, 8)
    PL.cla()
    interval = max(theWorld.pop_expertise) - min(theWorld.pop_expertise)
    step = 0.05
    b = [
        min(theWorld.pop_expertise) + step * i
        for i in range(int(interval / step) + 1)
    ]
    if theWorld.pop_expertise:
        PL.hist(theWorld.pop_expertise, bins=b, color='cyan')
        #     PL.axis()
        PL.title("population counts by foraging expertise")
    PL.hold(True)
    PL.subplot(4, 3, 9)
    PL.cla()
    PL.plot(theWorld.median_storage, color='black')
    PL.title("Median food stored")
    PL.hold(True)
    PL.subplot(4, 3, 10)
    PL.cla()
    PL.plot(theWorld.hoover, color='black')
    PL.title("Hoover index")
    PL.hold(True)
    PL.subplot(4, 3, 10)
    PL.cla()
    PL.plot(theWorld.avg_food_stored, color='black')
    PL.title("average food stored")
    PL.hold(False)
    plots.tight_layout()
    plots.canvas.manager.window.update()
    PL.figure(1)
Exemple #40
0
        drt_l_out = np.array(drt_l_out)

        drt_u_mean.append(np.mean(drt_u_out))
        drt_u_std.append(np.std(drt_u_out))
        drt_l_mean.append(np.mean(drt_l_out))
        drt_l_std.append(np.std(drt_l_out))

    drt_u_mean = np.array(drt_u_mean)
    drt_l_mean = np.array(drt_l_mean)
    drt_u_std = np.array(drt_u_std)
    drt_l_std = np.array(drt_l_std)
    residuals_u = (drt_u_mean - drt_in) / drt_u_std
    residuals_l = (drt_l_mean - drt_in) / drt_l_std

    ##- Setup figures
    plt.ion()
    plt.rcParams.update({'font.size': 14})
    colors = [
        '#396AB1', '#DA7C30', '#3E9651', '#CC2529', '#535154', '#6B4C9A',
        '#922428', '#948B3D'
    ]

    ##- Plot bias figure
    fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 8))
    fig.subplots_adjust(hspace=0.5)
    ax[0].set_title(
        fr'$\Delta r$, 1000000 realisations, rt={rt_in}, rp={rp_in}')
    ax[0].plot(drt_in, drt_in, color=colors[4], ls=':')
    ax[0].errorbar(drt_in,
                   drt_u_mean,
                   yerr=drt_u_std,
def modified_XOR(kernel, degree, C, sdev):
    import svm
    sv = svm.svm(kernel, degree=degree, C=C)
    #sv = svm.svm(kernel='poly',degree=3,C=0.2)
    #sv = svm.svm(kernel='rbf',C=0.1)
    #sv = svm.svm(kernel='poly',degree=3)
    #sdev = 0.4 #0.3 #0.1

    m = 100
    X = sdev * np.random.randn(m, 2)
    X[m / 2:, 0] += 1.
    X[m / 4:m / 2, 1] += 1.
    X[3 * m / 4:, 1] += 1.
    targets = -np.ones((m, 1))
    targets[:m / 4, 0] = 1.
    targets[3 * m / 4:, 0] = 1.
    #targets = (np.where(X[:,0]*X[:,1]>=0,1,-1)*np.ones((1,np.shape(X)[0]))).T

    sv.train_svm(X, targets)

    Y = sdev * np.random.randn(m, 2)
    Y[m / 2:, 0] += 1.
    Y[m / 4:m / 2, 1] += 1.
    Y[3 * m / 4:m, 1] += 1.
    test = -np.ones((m, 1))
    test[:m / 4, 0] = 1.
    test[3 * m / 4:, 0] = 1.

    #test = (np.where(Y[:,0]*Y[:,1]>=0,1,-1)*np.ones((1,np.shape(Y)[0]))).T
    #print test.T
    output = sv.classifier(Y, soft=False)
    #print output.T
    #print test.T
    err1 = np.where((output == 1.) & (test == -1.))[0]
    err2 = np.where((output == -1.) & (test == 1.))[0]
    print kernel, C
    print "Class 1 errors ", len(err1), " from ", len(test[test == 1])
    print "Class 2 errors ", len(err2), " from ", len(test[test == -1])
    print "Test accuracy ", 1. - (float(len(err1) + len(err2))) / (
        len(test[test == 1]) + len(test[test == -1]))

    pl.ion()
    pl.figure()
    l1 = np.where(targets == 1)[0]
    l2 = np.where(targets == -1)[0]
    pl.plot(X[sv.sv, 0], X[sv.sv, 1], 'o', markeredgewidth=5)
    pl.plot(X[l1, 0], X[l1, 1], 'ko')
    pl.plot(X[l2, 0], X[l2, 1], 'wo')
    l1 = np.where(test == 1)[0]
    l2 = np.where(test == -1)[0]
    pl.plot(Y[l1, 0], Y[l1, 1], 'ks')
    pl.plot(Y[l2, 0], Y[l2, 1], 'ws')

    step = 0.1
    f0, f1 = np.meshgrid(
        np.arange(np.min(X[:, 0]) - 0.5,
                  np.max(X[:, 0]) + 0.5, step),
        np.arange(np.min(X[:, 1]) - 0.5,
                  np.max(X[:, 1]) + 0.5, step))

    out = sv.classifier(np.c_[np.ravel(f0), np.ravel(f1)], soft=True).T

    out = out.reshape(f0.shape)
    pl.contour(f0, f1, out, 2)

    pl.axis('off')
    pl.show()
Exemple #42
0
def loopEvents(RUNID, boardID, att):
    DISPLAY = 0
    print('DISPLAY = ', DISPLAY)
    pl.ion()
    filename = "C" + RUNID + "_b" + boardID + ".data.txt"
    #datafile = '../data/ulastai/'+filename
    datafile = '/home/martineau/GRAND/GRANDproto35/data/ulastai/' + filename
    print('Scanning', datafile)

    with open(datafile, "r") as f:
        evts = f.read().split('-----------------')

    nevts = len(evts) - 1
    print('Number of events:', nevts)
    time.sleep(1)
    date = []
    board = np.zeros(shape=(np.size(evts)), dtype=np.int32)

    TS2 = np.zeros(shape=(np.size(evts)))
    TS1PPS = np.zeros(shape=(np.size(evts)))
    TS1Trig = np.zeros(shape=(np.size(evts)))
    SSS = np.zeros(shape=(np.size(evts)), dtype=np.int32)
    EvtId = np.zeros(shape=(np.size(evts)), dtype=np.int32)
    TrigPattern = np.zeros(shape=(np.size(evts)))
    imax = np.zeros(shape=(nevts, 3), dtype=int)
    Amax = np.zeros(shape=(nevts, 3))
    mub = np.zeros(shape=(nevts, 3))
    sigb = np.zeros(shape=(nevts, 3))

    j = 0
    # Index of array filling (because date & data are "append")
    for i in range(1, nevts + 1):
        if float(i) / 100 == int(i / 100):
            print('Event', i, '/', nevts)
        evt = evts[i]
        evtsplit = evt.split('\n')
        if np.size(evtsplit) > 8:  # Event is of normal size
            date.append(evtsplit[1])
            IP = evtsplit[2][3:]
            board[j] = int(IP[-2:])

            TS2[j] = int(
                evtsplit[3][4:]
            )  # time elapsed since last PPS (125MHz clock <=> 8ns counter)
            tt = int(evtsplit[4][11:])  # phase in 8ns slot fr trigger
            TS1Trig[i] = get_1stone(hex(tt))
            tpps = int(evtsplit[5][7:])
            TS1PPS[j] = get_1stone(hex(tpps))  # phase in 8ns slot for PPS
            SSS[j] = int(evtsplit[6][4:])  # Elapsed seconds since start
            EvtId[j] = int(evtsplit[7][3:])
            TrigPattern[j] = int(evtsplit[8][12:])
            # Data
            raw = evtsplit[9:][:]  #raw data
            raw2 = raw[0].split(" ")  # Cut raw data list into samples
            raw2 = raw2[0:np.size(raw2) - 1]  # Remove last element (empty)
            hraw2 = [hex(int(a)) for a in raw2]  # TRansfer back to hexadecimal
            draw = [twos_comp(int(a, 16), 12) for a in hraw2]  #2s complements
            draw = np.array(draw) * 1. / 2048  # in Volts
            nsamples = len(draw) / 4  # Separate data to each channel
            offset = nsamples / 2.0
            thisEvent = np.reshape(draw, (4, nsamples))
            #data.append(thisEvent) # Write to data list ... Not needed here

            if DISPLAY:
                print('Event ', j, 'at date', date[j])
                t = np.array(range(np.shape(thisEvent)[1]))
                t = t * 10e-3  #in mus
                pl.figure(j)
                pl.subplot(311)
                pl.plot(t[3:], thisEvent[0][3:])
                pl.ylabel('Amplitude [LSB]')
                pl.grid(True)
                pl.subplot(312)
                pl.ylabel('Amplitude [LSB]')
                pl.plot(t[3:], thisEvent[1][3:])
                pl.grid(True)
                pl.subplot(313)
                pl.plot(t[3:], thisEvent[2][3:])
                pl.xlabel('Time [mus]')
                pl.ylabel('Amplitude [LSB]')

                pl.grid(True)
                pl.suptitle('Board {0} Event {1}'.format(board[j], EvtId[j]))
                pl.show()
                raw_input()
                pl.close(j)

            for k in [0, 1, 2]:
                nz = np.where(thisEvent[k][:] != 0)
                imax[j, k] = np.argmax(thisEvent[k][:])
                Amax[j, k] = thisEvent[k][imax[j, k]]
                mub[j, k] = np.mean(thisEvent[k][nz])
                sigb[j, k] = np.std(thisEvent[k][nz])
                j = j + 1
            else:
                print('Error! Empty event', i)

    boards = set(board[np.where(board > 0)])
    print('Boards in run:', list(boards))
    j = 0
    m = np.empty([len(boards), 3])
    em = np.empty([len(boards), 3])
    for id in boards:
        sel = np.where(board == id)
        for k in [0, 1, 2]:
            pl.figure(1)
            subpl = 311 + k
            pl.subplot(subpl)
            a = mub[sel, k][0]
            pl.hist(a, 100)
            if k == 0:
                pl.title('Board {0}'.format(id))

            if k == 2:
                pl.xlabel('Mean amplitude')
            pl.grid(True)
            pl.figure(2)
            subpl = 311 + k
            pl.subplot(subpl)
            b = sigb[sel, k][0]
            pl.hist(b, 100)
            if k == 0:
                pl.title('Board {0}'.format(id))
            if k == 2:
                pl.xlabel('Std dev')
            pl.grid(True)

            #Pack up results
            m[j, k] = np.mean(a)
            em[j, k] = np.mean(b)
            print('Channel', k, ': mean=', m[j, k], '; stddev=', em[j, k])

        j = j + 1

    return {'m': m, 'em': em}
Exemple #43
0
def spectrum_image(avg_num, direction):

    pylab.ion()

    [Burleigh_WM, Burleigh_WM_bool] = Initialise_Burleigh_WM()
    [SpecAn, SpecAn_Bool] = Initialise_HP8560E_SpecAn()

    #set the spectrum analyser scanning
    HP8560E_SpecAn_Trigger('FREE', 'CONTS', SpecAn)

    [SpecAn_BW, SpecAn_Sweeptime
     ] = HP8560E_SpecAn_Resbandwidth_Sweeptime(resbandwidth, sweeptime, SpecAn)
    [SpecAn_Centre,
     SpecAn_Span] = HP8560E_SpecAn_Centre_Span(centre, span, SpecAn)
    [SpecAn_track, SpecAn_Power] = HP8560E_SpecAn_RF(tracking_gen, RF_power,
                                                     SpecAn)

    #need initial x_axis measurements to generate output array
    x_axis_data = np.arange(centre - span / 2, centre + span / 2, span / 601)

    #load in the offset from InGaAs detector and Modulator response
    amplitude_offset = np.loadtxt(
        "C:\\Users\\Milos\Desktop\\Er_Experiment_Interface_Code\\amplitude_offset.csv",
        delimiter=",")

    compensated_array = np.zeros((avg_num, 601), dtype=float)

    c = 299792453

    Burleigh_WM.write("FETC:SCAL:WAV?")
    laser_wavelength = np.float(Burleigh_WM.read())

    #convert x axis from frequency to wavelength

    if direction == 'pos':
        wavelength_values = x_axis_data * ((laser_wavelength**2) /
                                           (c * 10**9)) + laser_wavelength

        frequency_values = -x_axis_data + (c * 10**9) / laser_wavelength
    else:
        wavelength_values = -x_axis_data * ((laser_wavelength**2) /
                                            (c * 10**9)) + laser_wavelength
        frequency_values = x_axis_data + (c * 10**9) / laser_wavelength

    #now we collect the data trace
    for i in range(avg_num):
        plt.clf()

        #collect data from spectrum analyser
        SpecAn.write("TRA?")
        binary_string = SpecAn.read_raw()
        hex_string = binascii.b2a_hex(binary_string)
        spec_data_temp = np.zeros(601)

        for j in range(601):

            spec_data_temp[j] = int('0x' + hex_string[j * 4:j * 4 + 4], 0)

        compensated_data = np.subtract(spec_data_temp, amplitude_offset)

        compensated_array[i, :] = compensated_data[:]
        plt.plot(wavelength_values, compensated_data)
        plt.pause(0.01)

    #turn off RF so it stops burning
    [SpecAn_track, SpecAn_Power] = HP8560E_SpecAn_RF('OFF', RF_power, SpecAn)

    #average the above runs into one array
    avg_spec_data = np.average(compensated_array, 0)

    #print avg_spec_data

    plt.clf()
    pylab.ioff()

    foldername = 'spin polarisation image'
    filename = 'unpolarised-negative direction'

    if direction == 'pos':

        save_data(filename, foldername, laser_wavelength, avg_spec_data,
                  wavelength_values, frequency_values)
        plt.plot(wavelength_values - 1538, avg_spec_data)
        plt.xlim(wavelength_values[1] - 1538, wavelength_values[-1] - 1538)
        plt.show()

    else:

        save_data(filename, foldername, laser_wavelength,
                  np.flipud(avg_spec_data), np.flipud(wavelength_values),
                  np.flipud(frequency_values))
        plt.plot(np.flipud(wavelength_values) - 1538, np.flipud(avg_spec_data))
        plt.xlim(wavelength_values[-1] - 1538, wavelength_values[1] - 1538)
        plt.show()
#! /usr/bin/env python
# encoding: UTF-8
from astropy.table import Table
import numpy as np
import pylab as pt
from matplotlib.backends.backend_pdf import PdfPages
from gammapy.utils.energy import EnergyBounds
from method_fit import *
from method_plot import *
import yaml
import sys
pt.ion()
"""
./plot_spectra.py "config_crab.yaml"
plot la valeur des differentes composantes utilisees pour le fit morpho
"""

input_param = yaml.load(open(sys.argv[1]))
#Input param fit and source configuration
image_size = input_param["general"]["image_size"]
extraction_region = input_param["param_fit"]["extraction_region"]
freeze_bkg = input_param["param_fit"]["freeze_bkg"]
source_name = input_param["general"]["source_name"]
name_method_fond = input_param["general"]["name_method_fond"]
name = "_region_" + str(extraction_region) + "pix"
if freeze_bkg:
    name += "_bkg_fix"
else:
    name += "_bkg_free"
for_integral_flux = input_param["exposure"]["for_integral_flux"]
Exemple #45
0
for i in xrange(X.size):
    griddata.addSample([X.ravel()[i], Y.ravel()[i]], [0])
griddata._convertToOneOfMany(
)  # this is still needed to make the fnn feel comfy

for i in range(20):
    trainer.trainEpochs(1)
    trnresult = percentError(trainer.testOnClassData(), trndata['class'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['class'])

    print "epoch: %4d" % trainer.totalepochs, \
          "  train error: %5.2f%%" % trnresult, \
          "  test error: %5.2f%%" % tstresult
    out = fnn.activateOnDataset(griddata)
    out = out.argmax(axis=1)  # the highest output activation gives the class
    out = out.reshape(X.shape)
    figure(1)
    ioff()  # interactive graphics off
    clf()  # clear the plot
    hold(True)  # overplot on
    for c in [0, 1, 2]:
        here, _ = where(tstdata['class'] == c)
        plot(tstdata['input'][here, 0], tstdata['input'][here, 1], 'o')
    if out.max() != out.min():  # safety check against flat field
        contourf(X, Y, out)  # plot the contour
    ion()  # interactive graphics on
    draw()  # update the plot

ioff()
show()
Exemple #46
0
    def aux_basic(self, dirname, rc):
        """Helper function -- to assure that all filehandlers
           get closed so we could remove trash directory.

           Otherwise -- .nfs* files on NFS-mounted drives cause problems
           """
        report = rc('UnitTest report',
                    title="Sample report for testing",
                    path=dirname)
        isdummy = isinstance(report, DummyReport)

        verbose.handlers = [report]
        verbose.level = 3
        verbose(1, "Starting")
        verbose(2, "Level 2")

        if not isdummy:
            self.assertTrue(len(report._story) == 2,
                            msg="We should have got some lines from verbose")

        if __debug__:
            odhandlers = debug.handlers
            debug.handlers = [report]
            oactive = debug.active
            debug.active = ['TEST'] + debug.active
            debug('TEST', "Testing report as handler for debug")
            if not isdummy:
                self.assertTrue(len(report._story) == 4,
                            msg="We should have got some lines from debug")
            debug.active = oactive
            debug.handlers = odhandlers

        os.makedirs(dirname)

        if externals.exists('pylab plottable'):
            if not isdummy:
                clen = len(report._story)
            import pylab as pl
            pl.ioff()
            pl.close('all')
            pl.figure()
            pl.plot([1, 2], [3, 2])

            pl.figure()
            pl.plot([2, 10], [3, 2])
            pl.title("Figure 2 must be it")
            report.figures()

            if not isdummy:
                self.assertTrue(
                    len(report._story) == clen+2,
                    msg="We should have got some lines from figures")

        report.text("Dugi bugi")
        # make sure we don't puke on xml like text with crap
        report.text("<kaj>$lkj&*()^$%#%</kaj>")
        report.text("locals:\n%s globals:\n%s" % (`locals()`, `globals()`))
        # bloody XML - just to check that there is no puke
        report.xml("<b>Dugi bugi</b>")
        report.save()

        if externals.exists('pylab'):
            import pylab as pl
            pl.close('all')
            pl.ion()

        pass
Exemple #47
0
 def pvhist(self, ns=0, nbins=20):
     print "nsamp", self.nsamp[ns]
     pylab.ion()
     pylab.hist(numpy.array(self.pvs[ns]), nbins)
def kepbls(infile,
           outfile,
           datacol,
           errcol,
           minper,
           maxper,
           mindur,
           maxdur,
           nsearch,
           nbins,
           plot,
           clobber,
           verbose,
           logfile,
           status,
           cmdLine=False):

    # startup parameters

    numpy.seterr(all="ignore")
    status = 0
    labelsize = 32
    ticksize = 18
    xsize = 16
    ysize = 8
    lcolor = '#0000ff'
    lwidth = 1.0
    fcolor = '#ffff00'
    falpha = 0.2

    # log the call

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = 'KEPBLS -- '
    call += 'infile=' + infile + ' '
    call += 'outfile=' + outfile + ' '
    call += 'datacol=' + str(datacol) + ' '
    call += 'errcol=' + str(errcol) + ' '
    call += 'minper=' + str(minper) + ' '
    call += 'maxper=' + str(maxper) + ' '
    call += 'mindur=' + str(mindur) + ' '
    call += 'maxdur=' + str(maxdur) + ' '
    call += 'nsearch=' + str(nsearch) + ' '
    call += 'nbins=' + str(nbins) + ' '
    plotit = 'n'
    if (plot): plotit = 'y'
    call += 'plot=' + plotit + ' '
    overwrite = 'n'
    if (clobber): overwrite = 'y'
    call += 'clobber=' + overwrite + ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose=' + chatter + ' '
    call += 'logfile=' + logfile
    kepmsg.log(logfile, call + '\n', verbose)

    # start time

    kepmsg.clock('KEPBLS started at', logfile, verbose)

    # is duration greater than one bin in the phased light curve?

    if float(nbins) * maxdur / 24.0 / maxper <= 1.0:
        message = 'WARNING -- KEPBLS: ' + str(
            maxdur) + ' hours transit duration < 1 phase bin when P = '
        message += str(maxper) + ' days'
        kepmsg.warn(logfile, message)

# test log file

    logfile = kepmsg.test(logfile)

    # clobber output file

    if clobber: status = kepio.clobber(outfile, logfile, verbose)
    if kepio.fileexists(outfile):
        message = 'ERROR -- KEPBLS: ' + outfile + ' exists. Use clobber=yes'
        status = kepmsg.err(logfile, message, verbose)

# open input file

    if status == 0:
        instr, status = kepio.openfits(infile, 'readonly', logfile, verbose)
    if status == 0:
        tstart, tstop, bjdref, cadence, status = kepio.timekeys(
            instr, infile, logfile, verbose, status)

# fudge non-compliant FITS keywords with no values

    if status == 0:
        instr = kepkey.emptykeys(instr, file, logfile, verbose)

# read table structure

    if status == 0:
        table, status = kepio.readfitstab(infile, instr[1], logfile, verbose)

# filter input data table

    if status == 0:
        work1 = numpy.array(
            [table.field('time'),
             table.field(datacol),
             table.field(errcol)])
        work1 = numpy.rot90(work1, 3)
        work1 = work1[~numpy.isnan(work1).any(1)]

# read table columns

    if status == 0:
        intime = work1[:, 2] + bjdref
        indata = work1[:, 1]
        inerr = work1[:, 0]

# test whether the period range is sensible

    if status == 0:
        tr = intime[-1] - intime[0]
        if maxper > tr:
            message = 'ERROR -- KEPBLS: maxper is larger than the time range of the input data'
            status = kepmsg.err(logfile, message, verbose)

# prepare time series

    if status == 0:
        work1 = intime - intime[0]
        work2 = indata - numpy.mean(indata)

# start period search

    if status == 0:
        srMax = numpy.array([], dtype='float32')
        transitDuration = numpy.array([], dtype='float32')
        transitPhase = numpy.array([], dtype='float32')
        dPeriod = (maxper - minper) / nsearch
        trialPeriods = numpy.arange(minper,
                                    maxper + dPeriod,
                                    dPeriod,
                                    dtype='float32')
        complete = 0
        print ' '
        for trialPeriod in trialPeriods:
            fracComplete = float(complete) / float(len(trialPeriods) -
                                                   1) * 100.0
            txt = '\r'
            txt += 'Trial period = '
            txt += str(int(trialPeriod))
            txt += ' days ['
            txt += str(int(fracComplete))
            txt += '% complete]'
            txt += ' ' * 20
            sys.stdout.write(txt)
            sys.stdout.flush()
            complete += 1
            srMax = numpy.append(srMax, 0.0)
            transitDuration = numpy.append(transitDuration, numpy.nan)
            transitPhase = numpy.append(transitPhase, numpy.nan)
            trialFrequency = 1.0 / trialPeriod

            # minimum and maximum transit durations in quantized phase units

            duration1 = max(int(float(nbins) * mindur / 24.0 / trialPeriod), 2)
            duration2 = max(
                int(float(nbins) * maxdur / 24.0 / trialPeriod) + 1,
                duration1 + 1)

            # 30 minutes in quantized phase units

            halfHour = int(0.02083333 / trialPeriod * nbins + 1)

            # compute folded time series with trial period

            work4 = numpy.zeros((nbins), dtype='float32')
            work5 = numpy.zeros((nbins), dtype='float32')
            phase = numpy.array(
                ((work1 * trialFrequency) -
                 numpy.floor(work1 * trialFrequency)) * float(nbins),
                dtype='int')
            ptuple = numpy.array([phase, work2, inerr])
            ptuple = numpy.rot90(ptuple, 3)
            phsort = numpy.array(sorted(ptuple, key=lambda ph: ph[2]))
            for i in range(nbins):
                elements = numpy.nonzero(phsort[:, 2] == float(i))[0]
                work4[i] = numpy.mean(phsort[elements, 1])
                work5[i] = math.sqrt(
                    numpy.sum(numpy.power(phsort[elements, 0], 2)) /
                    len(elements))

# extend the work arrays beyond nbins by wrapping

            work4 = numpy.append(work4, work4[:duration2])
            work5 = numpy.append(work5, work5[:duration2])

            # calculate weights of folded light curve points

            sigmaSum = numpy.nansum(numpy.power(work5, -2))
            omega = numpy.power(work5, -2) / sigmaSum

            # calculate weighted phased light curve

            s = omega * work4

            # iterate through trial period phase

            for i1 in range(nbins):

                # iterate through transit durations

                for duration in range(duration1, duration2 + 1, int(halfHour)):

                    # calculate maximum signal residue

                    i2 = i1 + duration
                    sr1 = numpy.sum(numpy.power(s[i1:i2], 2))
                    sr2 = numpy.sum(omega[i1:i2])
                    sr = math.sqrt(sr1 / (sr2 * (1.0 - sr2)))
                    if sr > srMax[-1]:
                        srMax[-1] = sr
                        transitDuration[-1] = float(duration)
                        transitPhase[-1] = float((i1 + i2) / 2)

# normalize maximum signal residue curve

        bestSr = numpy.max(srMax)
        bestTrial = numpy.nonzero(srMax == bestSr)[0][0]
        srMax /= bestSr
        transitDuration *= trialPeriods / 24.0
        BJD0 = numpy.array(transitPhase * trialPeriods / nbins,
                           dtype='float64') + intime[0] - 2454833.0
        print '\n'

# clean up x-axis unit

    if status == 0:
        ptime = copy(trialPeriods)
        xlab = 'Trial Period (days)'

# clean up y-axis units

    if status == 0:
        pout = copy(srMax)
        ylab = 'Normalized Signal Residue'

        # data limits

        xmin = ptime.min()
        xmax = ptime.max()
        ymin = pout.min()
        ymax = pout.max()
        xr = xmax - xmin
        yr = ymax - ymin
        ptime = insert(ptime, [0], [ptime[0]])
        ptime = append(ptime, [ptime[-1]])
        pout = insert(pout, [0], [0.0])
        pout = append(pout, 0.0)

# plot light curve

    if status == 0 and plot:
        plotLatex = True
        try:
            params = {
                'backend': 'png',
                'axes.linewidth': 2.5,
                'axes.labelsize': labelsize,
                'axes.font': 'sans-serif',
                'axes.fontweight': 'bold',
                'text.fontsize': 12,
                'legend.fontsize': 12,
                'xtick.labelsize': ticksize,
                'ytick.labelsize': ticksize
            }
            rcParams.update(params)
        except:
            plotLatex = False
    if status == 0 and plot:
        pylab.figure(figsize=[xsize, ysize])
        pylab.clf()

        # plot data

        ax = pylab.axes([0.06, 0.10, 0.93, 0.87])

        # force tick labels to be absolute rather than relative

        pylab.gca().xaxis.set_major_formatter(
            pylab.ScalarFormatter(useOffset=False))
        pylab.gca().yaxis.set_major_formatter(
            pylab.ScalarFormatter(useOffset=False))

        # rotate y labels by 90 deg

        labels = ax.get_yticklabels()
        pylab.setp(labels, 'rotation', 90)

# plot curve

    if status == 0 and plot:
        pylab.plot(ptime[1:-1],
                   pout[1:-1],
                   color=lcolor,
                   linestyle='-',
                   linewidth=lwidth)
        pylab.fill(ptime, pout, color=fcolor, linewidth=0.0, alpha=falpha)
        pylab.xlabel(xlab, {'color': 'k'})
        pylab.ylabel(ylab, {'color': 'k'})
        pylab.grid()

# plot ranges

    if status == 0 and plot:
        pylab.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
        if ymin >= 0.0:
            pylab.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
        else:
            pylab.ylim(1.0e-10, ymax + yr * 0.01)

# render plot

        if status == 0 and plot:
            if cmdLine:
                pylab.show()
            else:
                pylab.ion()
                pylab.plot([])
                pylab.ioff()

# append new BLS data extension to the output file

    if status == 0:
        col1 = Column(name='PERIOD',
                      format='E',
                      unit='days',
                      array=trialPeriods)
        col2 = Column(name='BJD0',
                      format='D',
                      unit='BJD - 2454833',
                      array=BJD0)
        col3 = Column(name='DURATION',
                      format='E',
                      unit='hours',
                      array=transitDuration)
        col4 = Column(name='SIG_RES', format='E', array=srMax)
        cols = ColDefs([col1, col2, col3, col4])
        instr.append(new_table(cols))
        instr[-1].header.cards['TTYPE1'].comment = 'column title: trial period'
        instr[-1].header.cards[
            'TTYPE2'].comment = 'column title: trial mid-transit zero-point'
        instr[-1].header.cards[
            'TTYPE3'].comment = 'column title: trial transit duration'
        instr[-1].header.cards[
            'TTYPE4'].comment = 'column title: normalized signal residue'
        instr[-1].header.cards['TFORM1'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM2'].comment = 'column type: float64'
        instr[-1].header.cards['TFORM3'].comment = 'column type: float32'
        instr[-1].header.cards['TFORM4'].comment = 'column type: float32'
        instr[-1].header.cards['TUNIT1'].comment = 'column units: days'
        instr[-1].header.cards[
            'TUNIT2'].comment = 'column units: BJD - 2454833'
        instr[-1].header.cards['TUNIT3'].comment = 'column units: hours'
        instr[-1].header.update('EXTNAME', 'BLS', 'extension name')
        instr[-1].header.update('PERIOD', trialPeriods[bestTrial],
                                'most significant trial period [d]')
        instr[-1].header.update('BJD0', BJD0[bestTrial] + 2454833.0,
                                'time of mid-transit [BJD]')
        instr[-1].header.update('TRANSDUR', transitDuration[bestTrial],
                                'transit duration [hours]')
        instr[-1].header.update('SIGNRES', srMax[bestTrial] * bestSr,
                                'maximum signal residue')

# history keyword in output file

    if status == 0:
        status = kepkey.history(call, instr[0], outfile, logfile, verbose)
        instr.writeto(outfile)

# close input file

    if status == 0:
        status = kepio.closefits(instr, logfile, verbose)

# print best trial period results

    if status == 0:
        print '      Best trial period = %.5f days' % trialPeriods[bestTrial]
        print '    Time of mid-transit = BJD %.5f' % (BJD0[bestTrial] +
                                                      2454833.0)
        print '       Transit duration = %.5f hours' % transitDuration[
            bestTrial]
        print ' Maximum signal residue = %.4g \n' % (srMax[bestTrial] * bestSr)

# end time

    if (status == 0):
        message = 'KEPBLS completed at'
    else:
        message = '\nKEPBLS aborted at'
    kepmsg.clock(message, logfile, verbose)
Exemple #49
0
import sys
sys.path.insert(0, '..')
import numpy as np
import pylab as pb
from gp import chgp
from gp import hgp
import kern

pb.ion()

#build a double-hierarchy HGP
#1) construct the data
genenames = ['foo', 'bar', 'baz', 'bex']
Nrep = 5
Ngene = len(genenames)
Nd = [np.random.randint(2, 8) for i in range(Nrep)]
X = [np.random.randn(Ndi, 1) for Ndi in Nd]
[xx.sort(0) for xx in X]
Y = [[
    np.sin(xx) + 0.3 * np.sin(xx + 5 * np.random.rand()) +
    0.5 * np.sin(xx + 10. * gs) + 0.05 * np.random.randn(Ndi, 1)
    for Ndi, xx in zip(Nd, X)
] for g, gs in enumerate([np.random.randn() for i in range(Ngene)])]

#2) construct the pdata
pdata_chgp = np.vstack(
    [np.tile([[str(i)]], (Ndi, 1)) for i, Ndi in enumerate(Nd)])
pdata_rep = np.tile(pdata_chgp, (Ngene, 1))
pdata_gene = np.vstack(
    [np.tile([[str(i)]],
             np.vstack(X).shape) for i in range(Ngene)])
Exemple #50
0
from __future__ import absolute_import
import theano
import matplotlib
if 'MACOSX' in matplotlib.get_backend().upper():
    matplotlib.use('TKAgg')
import pylab as py
py.ion()  ## Turn on plot visualization

import gzip, pickle
import numpy as np
from PIL import Image
import cv2
import keras.backend as K
K.set_image_dim_ordering('th')
from keras.layers import Input, merge, TimeDistributed, LSTM, GRU, RepeatVector
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Activation, Reshape
from keras.initializations import normal, identity, he_normal, glorot_normal, glorot_uniform, he_uniform
from keras.layers.normalization import BatchNormalization
import threading


############# Define Data Generators ################
class ImageNoiseDataGenerator(object):
    '''Generate minibatches with
    realtime data augmentation.
    '''
    def __init__(self, corruption_level=0.5):

        self.__dict__.update(locals())
        self.p = corruption_level
def lsm_matching(patch, lsm_search, pointAdjusted, lsm_buffer, thresh=0.001):
    # source code from Ellen Schwalbe rewritten for Python
    #x1, y1 of patch (template); x2, y2 of search area (little bit bigger)
    add_val = 1

    px = patch.shape[1]
    py = patch.shape[0]
    n = px * py

    dif_patch_lsm_size_x = (lsm_search.shape[1] - patch.shape[1]) / 2
    dif_patch_lsm_size_y = (lsm_search.shape[0] - patch.shape[0]) / 2

    p_shift_ini = pointImg()
    p_shift_ini.x = np.int(lsm_search.shape[1] / 2)
    p_shift_ini.y = np.int(lsm_search.shape[0] / 2)

    #approximation
    U = np.asarray(
        [np.int(lsm_search.shape[1] / 2),
         np.int(lsm_search.shape[0] / 2)],
        dtype=np.float)
    #     #tx, ty, alpha
    #     U = np.asarray([np.int(lsm_search.shape[1]/2), np.int(lsm_search.shape[0]/2),
    #                     np.float(0)], dtype=np.float)

    A = np.zeros((n, U.shape[0]))
    l = np.zeros((n, 1))

    for i in range(100):  #number of maximum iterations

        lsm_search = contrastAdaption(patch, lsm_search)
        lsm_search = brightnessAdaption(patch, lsm_search)

        #calculate gradient at corresponding (adjusting) position U
        count = 0
        img_test_search = np.zeros((lsm_search.shape[0], lsm_search.shape[1]))
        img_test_patch = np.zeros((patch.shape[0], patch.shape[1]))
        for x1 in range(px):
            for y1 in range(py):
                if (U[0] - p_shift_ini.x < -(lsm_buffer + dif_patch_lsm_size_x)
                        or U[0] - p_shift_ini.x >
                        lsm_search.shape[1] + lsm_buffer - 1
                        or U[1] - p_shift_ini.y <
                        -(lsm_buffer + dif_patch_lsm_size_y)
                        or U[1] - p_shift_ini.y >
                        lsm_search.shape[0] + lsm_buffer - 1):
                    print(count, i)
                    print('patch out of search area')
                    return 1 / 0

                x2 = x1 + U[
                    0] - p_shift_ini.x + dif_patch_lsm_size_x  #shift to coordinate system of lsm_search
                y2 = y1 + U[1] - p_shift_ini.y + dif_patch_lsm_size_y
                #                 #rotation and translation
                #                 x2 = x1 * np.cos(U[2]) - y1 * np.sin(U[2]) + U[0]-p_shift_ini.x + dif_patch_lsm_size_x
                #                 y2 = x1 * np.sin(U[2]) + y1 * np.cos(U[2]) + U[1]-p_shift_ini.y + dif_patch_lsm_size_y

                g1 = patch[int(y1), int(x1)]
                g2 = interopolateGreyvalue(lsm_search, x2, y2)

                img_test_patch[y1, x1] = g1
                img_test_search[int(y2), int(x2)] = g2

                plt.ion()

                #translation x
                gx1 = interopolateGreyvalue(lsm_search, x2 - add_val, y2)
                gx2 = interopolateGreyvalue(lsm_search, x2 + add_val, y2)

                #translation y
                gy1 = interopolateGreyvalue(lsm_search, x2, y2 - add_val)
                gy2 = interopolateGreyvalue(lsm_search, x2, y2 + add_val)

                #                 #rotation
                #                 galpha1 = interopolateGreyvalue(lsm_search, x2, y2, 1)
                #                 galpha2 = interopolateGreyvalue(lsm_search, x2, y2, -1)

                plt.close('all')

                if g1 < 0 or g2 < 0 or gx1 < 0 or gy1 < 0 or gx2 < 0 or gy2 < 0:
                    print(count, i)
                    print('error during gradient calculation')
                    return 1 / 0

                l[count] = g2 - g1

                #translation
                A[count, 0] = gx1 - gx2
                A[count, 1] = gy1 - gy2
                #                 #rotation
                #                 A[count, 2] = galpha1-galpha2

                count = count + 1

        #perform adjustment with gradients
        dx_lsm, s0 = adjustmentGradient(A, l)

        #adds corrections to the values of unknowns
        SUM = 0
        for j in range(U.shape[0]):
            U[j] = U[j] + dx_lsm[j]
            SUM = SUM + np.abs(dx_lsm[j])


#         print SUM, U, dx_lsm

#stops the iteration if sum of additions is very small
        if (SUM < thresh):
            pointAdjusted.x = U[0]
            pointAdjusted.y = U[1]
            pointAdjusted.s0 = s0
            pointAdjusted.usedObserv = n

            return pointAdjusted

    print('adjustment not converging')
    return -1
Exemple #52
0
def plotgauge(gaugeno, plotdata, verbose=False):
    #==========================================
    """
    Plot all requested plots for a single gauge from the computation.
    The plots are requested by setting attributes of plotdata
    to ClawPlotFigure objects with plot_type="each_gauge".

    """

    if verbose:
        gaugesoln = plotdata.getgauge(gaugeno)
        print '    Plotting gauge %s  at x = %s, y = %s ... '  \
                 % (gaugeno, gaugesoln.location[0], gaugesoln.location[1])

    if plotdata.mode() == 'iplotclaw':
        pylab.ion()

    try:
        plotfigure_dict = plotdata.plotfigure_dict
    except:
        print '*** Error in plotgauge: plotdata missing plotfigure_dict'
        print '*** This should not happen'
        return None

    if len(plotfigure_dict) == 0:
        print '*** Warning in plotgauge: plotdata has empty plotfigure_dict'
        print '*** Apparently no figures to plot'

    # initialize current_data containing data that will be passed
    # to beforegauge, aftergauge, afteraxes commands
    current_data = clawdata.ClawData()
    current_data.add_attribute("user", {})  # for user specified attributes
    # to avoid potential conflicts
    current_data.add_attribute('plotdata', plotdata)
    current_data.add_attribute('gaugeno', gaugeno)

    # call beforegauge if present, which might define additional
    # attributes in current_data or otherwise set up plotting for this
    # gauge.

    beforegauge = getattr(plotdata, 'beforegauge', None)
    if beforegauge:
        if isinstance(beforegauge, str):
            # a string to be executed
            exec(beforegauge)
        else:
            # assume it's a function
            try:
                output = beforegauge(current_data)
                if output: current_data = output
            except:
                print '*** Error in beforegauge ***'
                raise

    # iterate over each single plot that makes up this gauge:
    # -------------------------------------------------------

    if plotdata._mode == 'iplotclaw':
        gaugesoln = plotdata.getgauge(gaugeno)
        print '    Plotting Gauge %s  at x = %s, y = %s ... '  \
                 % (gaugeno, gaugesoln.location[0], gaugesoln.location[1])
        requested_fignos = plotdata.iplotclaw_fignos
    else:
        requested_fignos = plotdata.print_fignos
    plotted_fignos = []

    plotdata = set_show(plotdata)  # set _show attributes for which figures
    # and axes should be shown.

    # loop over figures to appear for this gauge:
    # -------------------------------------------

    for figname in plotdata._fignames:
        plotfigure = plotdata.plotfigure_dict[figname]
        if (not plotfigure._show) or (plotfigure.type != 'each_gauge'):
            continue  # skip to next figure

        figno = plotfigure.figno
        if requested_fignos != 'all':
            if figno not in requested_fignos:
                continue  # skip to next figure

        plotted_fignos.append(figno)

        if not plotfigure.kwargs.has_key('facecolor'):
            # use Clawpack's default bg color (tan)
            plotfigure.kwargs['facecolor'] = '#ffeebb'

        # create figure and set handle:
        plotfigure._handle = pylab.figure(num=figno, **plotfigure.kwargs)

        pylab.ioff()
        if plotfigure.clf_each_gauge:
            pylab.clf()

        try:
            plotaxes_dict = plotfigure.plotaxes_dict
        except:
            print '*** Error in plotgauge: plotdata missing plotaxes_dict'
            print '*** This should not happen'
            return None

        if (len(plotaxes_dict) == 0) or (len(plotfigure._axesnames) == 0):
            print '*** Warning in plotgauge: plotdata has empty plotaxes_dict'
            print '*** Apparently no axes to plot in figno ', figno

        # loop over axes to appear on this figure:
        # ----------------------------------------

        for axesname in plotfigure._axesnames:
            plotaxes = plotaxes_dict[axesname]
            if not plotaxes._show:
                continue  # skip this axes if no items show

            # create the axes:
            axescmd = getattr(plotaxes, 'axescmd', 'subplot(1,1,1)')
            axescmd = 'plotaxes._handle = pylab.%s' % axescmd
            exec(axescmd)
            pylab.hold(True)

            # loop over items:
            # ----------------

            for itemname in plotaxes._itemnames:

                plotitem = plotaxes.plotitem_dict[itemname]
                outdir = plotitem.outdir
                if outdir is None:
                    outdir = plotdata.outdir
                gaugesoln = plotdata.getgauge(gaugeno, outdir)

                current_data.add_attribute('gaugesoln', gaugesoln)
                current_data.add_attribute('q', gaugesoln.q)
                current_data.add_attribute('t', gaugesoln.t)

                if plotitem._show:
                    try:
                        output = plotgauge1(gaugesoln, plotitem, current_data)
                        if output: current_data = output
                        if verbose:
                            print '      Plotted  plotitem ', itemname
                    except:
                        print '*** Error in plotgauge: problem calling plotgauge1'
                        traceback.print_exc()
                        return None

            # end of loop over plotitems

        for itemname in plotaxes._itemnames:
            plotitem = plotaxes.plotitem_dict[itemname]

        pylab.title("%s at gauge %s" % (plotaxes.title, gaugeno))

        # call an afteraxes function if present:
        afteraxes = getattr(plotaxes, 'afteraxes', None)
        if afteraxes:
            if isinstance(afteraxes, str):
                # a string to be executed
                exec(afteraxes)
            else:
                # assume it's a function
                try:
                    current_data.add_attribute("plotaxes", plotaxes)
                    current_data.add_attribute("plotfigure",
                                               plotaxes._plotfigure)
                    output = afteraxes(current_data)
                    if output: current_data = output
                except:
                    print '*** Error in afteraxes ***'
                    raise

        if plotaxes.scaled:
            pylab.axis('scaled')

        # set axes limits:
        if (plotaxes.xlimits is not None) & (type(plotaxes.xlimits)
                                             is not str):
            try:
                pylab.xlim(plotaxes.xlimits[0], plotaxes.xlimits[1])
            except:
                pass  # let axis be set automatically
        if (plotaxes.ylimits is not None) & (type(plotaxes.ylimits)
                                             is not str):
            try:
                pylab.ylim(plotaxes.ylimits[0], plotaxes.ylimits[1])
            except:
                pass  # let axis be set automatically

            # end of loop over plotaxes

        # end of loop over plotfigures

    # call an aftergauge function if present:
    aftergauge = getattr(plotdata, 'aftergauge', None)
    if aftergauge:
        if isinstance(aftergauge, str):
            # a string to be executed
            exec(aftergauge)
        else:
            # assume it's a function
            try:
                output = aftergauge(current_data)
                if output: current_data = output
            except:
                print '*** Error in aftergauge ***'
                raise

    if plotdata.mode() == 'iplotclaw':
        pylab.ion()
    for figno in plotted_fignos:
        pylab.figure(figno)
        pylab.draw()

    if verbose:
        print '    Done with plotgauge for gauge %i' % (gaugeno)

    # print the figure(s) to file(s) if requested:
    if (plotdata.mode() != 'iplotclaw') & plotdata.printfigs:
        # iterate over all figures that are to be printed:
        for figno in plotted_fignos:
            printfig(gaugeno=gaugeno, figno=figno, \
                    format=plotdata.print_format, plotdir=plotdata.plotdir,\
                    verbose=verbose)

    return current_data
Exemple #53
0
               0,
               but_h,
               fun=callback.Xfrew)
bfrew = mybut('<<', fig, button_layout_cursor, y0, 0, but_h, fun=callback.frew)
brew = mybut('<', fig, button_layout_cursor, y0, 0, but_h, fun=callback.rew)
bshot = mybut('12345',
              fig,
              button_layout_cursor,
              y0,
              0,
              but_h,
              fun=callback.shot)
bfwd = mybut('>', fig, button_layout_cursor, y0, 0, but_h, fun=callback.fwd)
bffwd = mybut('>>', fig, button_layout_cursor, y0, 0, but_h, fun=callback.ffwd)
bXffwd = mybut('>>>',
               fig,
               button_layout_cursor,
               y0,
               0,
               but_h,
               fun=callback.Xffwd)

if oldinter: pl.ion()

# this is sort of initialisation code, but needed to be in a function
if HaveTix: ShotWid()

callback.redraw()

pl.show()
Exemple #54
0
    kwargs = dict(nwalkers=args.nwalkers,nburn=args.nburn,
                  nsteps=args.nsteps,nthreads=args.nthreads, sigmaprior=args.sigmaprior)
    samples,sampler = mcmc(vel,velerr,**kwargs)

    mean,std = scipy.stats.norm.fit(vel)
    print '%-05s : %.2f'%('mean',mean)
    print '%-05s : %.2f'%('std',std)

    intervals = []
    for i,name in enumerate(PARAMS.keys()):
        peak,[low,high] = peak_interval(samples[name],alpha=alpha)
        print "%-05s : %.2f [%.2f,%.2f]"%(name,peak,low,high)
        intervals.append([low,high])

    if args.plot:
        try:
            import pylab as plt
            fig = plot(samples,intervals,sigma_clip=4)
            plt.ion(); plt.show()

            outfile = os.path.splitext(args.infile)[0]+'.pdf'
            warnings.filterwarnings('ignore')
            plt.savefig(outfile)
            warnings.resetwarnings()

        except ImportError as e:
            msg = '\n '+e.message
            msg +='\n Failed to create plot.'
            warnings.warn(msg)
Exemple #55
0
import scipy as sc
output = sc.test('all', raise_warnings='release')
import pylab as gr
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button, RadioButtons
import scipy.io as sio
import sympy as sy
from analysis1 import GetLocalExtrema
gr.ion()

x, p = sy.symbols('x p')
a, pa, r, xr = sy.symbols('a pa r xr')
h = sy.symbols('h')

dpAP = a * (pa - p) + h * (1 - p)
dxAP = r * x * (xr - x) + p * (1 - x)
dp = a * (p - pa)
dx = r * x * (xr - x)

# Stimulus
pars = {
    'timeStart': 0.0,
    'timeMax': 500.0,
    'timeStep': 1e-3,
    'tauX': 75.0,
    'tauP': 75.0,
    'asynX': 1.0,
    'asynP': 0.3,
    'kP': 0.0,
def performFeatureTracking(template_size,
                           search_area,
                           initCooTemplate,
                           templateImage,
                           searchImage,
                           shiftSearchArea,
                           performLSM=True,
                           lsm_buffer=3,
                           thresh=0.001,
                           subpixel=False,
                           plot_result=False):
    #template_size: np.array([template_width, template_height])
    #search_area: np.array([search_area_x_CC, search_area_y_CC])
    #initCooTemplate: np.array([x,y])
    #shiftSearchArea: np.array([shiftFromCenter_x, shiftFromCenter_y])
    template_width = template_size[0]
    template_height = template_size[1]
    search_area_x = search_area[0]
    search_area_y = search_area[1]
    shiftSearchArea_x = shiftSearchArea[0]
    shiftSearchArea_y = shiftSearchArea[1]

    #check if template sizes even and correct correspondingly
    if int(template_width) % 2 == 0:
        template_width = template_width + 1
    if int(template_height) % 2 == 0:
        template_height = template_height + 1
    if int(search_area_x) % 2 == 0:
        search_area_x = search_area_x + 1
    if int(search_area_y) % 2 == 0:
        search_area_y = search_area_y + 1

    #get patch clip
    if plot_result:
        plt.imshow(templateImage)
        plt.plot(initCooTemplate[0], initCooTemplate[1], "r.", markersize=10)
        plt.waitforbuttonpress()
        plt.cla()
        plt.close('all')

    try:
        patch, _ = getTemplate(templateImage, initCooTemplate, template_width,
                               template_height, True)
    except Exception as e:
        #        _, _, exc_tb = sys.exc_info()
        #        print(e, 'line ' + str(exc_tb.tb_lineno))
        print('template patch reaches border')
        return 1 / 0

    #shift search area to corresponding position considering movement direction
    templateCoo_init_shift = np.array([
        initCooTemplate[0] + shiftSearchArea_x,
        initCooTemplate[1] + shiftSearchArea_y
    ])

    #get lsm search clip
    try:
        search_area, lowerLeftCoo_lsm_search = getTemplate(
            searchImage, templateCoo_init_shift, search_area_x, search_area_y,
            True)

    except Exception as e:
        #        _, _, exc_tb = sys.exc_info()
        #        print(e, 'line ' + str(exc_tb.tb_lineno))
        print('search patch reaches border')
        return 1 / 0

    if plot_result:
        plt.ion()

    CC_xy = crossCorrelation(search_area, patch, lowerLeftCoo_lsm_search,
                             plot_result, subpixel)
    if CC_xy[0] == -999:
        return 1 / 0

    if plot_result:
        plt.close('all')
        print(CC_xy)

    TrackedFeature = CC_xy

    if performLSM:
        #perform least square matching (subpixel accuracy possible)
        try:
            lsm_search, lowerLeftCoo_lsm_search = getTemplate(
                searchImage, CC_xy, search_area_x, search_area_y, True)
        except Exception as e:
            #            _, _, exc_tb = sys.exc_info()
            #            print(e, 'line ' + str(exc_tb.tb_lineno))
            print('lsm patch reaches border')
            return 1 / 0

        if plot_result:
            plt.imshow(lsm_search)
            plt.waitforbuttonpress()
            plt.close('all')

        pointAdjusted_ = pointAdjusted()

        try:
            result_lsm = lsm_matching(patch, lsm_search, pointAdjusted_,
                                      lsm_buffer, thresh)
            print('sigma LSM tracking: ' + str(result_lsm.s0))

            if plot_result:
                plt.imshow(searchImage, cmap='gray')
                plt.plot(result_lsm.y + lowerLeftCoo_lsm_search[0],
                         result_lsm.x + lowerLeftCoo_lsm_search[1],
                         "b.",
                         markersize=10)
                plt.waitforbuttonpress()
                plt.close('all')

            TrackedFeature = np.asarray([result_lsm.x, result_lsm.y])

        except Exception as e:
            #            _, _, exc_tb = sys.exc_info()
            #            print(e, 'line ' + str(exc_tb.tb_lineno))
            print('lsm failed')

    return TrackedFeature
Exemple #57
0
def main():

    p = ArgumentParser()
    p.add_argument('filename', help='Path to csv file containing the results.')
    p.add_argument('baseline', help='Path to csv file containing the results.')
    p.add_argument('--accuracy', required=1)
    p.add_argument('--runtime', required=1)
    p.add_argument('--data', choices=('train', 'dev'), default='dev')
    #    p.add_argument('--save')

    args = p.parse_args()
    df = pandas.read_csv(args.filename)

    RUNTIME = '%s_new_policy_%s' % (args.data, args.runtime)
    ACCURACY = '%s_new_policy_%s' % (args.data, args.accuracy)
    df.sort_values(RUNTIME, inplace=1, ascending=False)
    [grammar] = df.args_grammar.unique()
    print 'Grammar: %s' % grammar

    assert not df.empty
    print df[[ACCURACY, RUNTIME, 'tradeoff', 'jobid']]

    rescale = 1 / df[RUNTIME].max()

    ax = pl.figure().add_subplot(111)
    #pl.axes(frameon=0)
    #pl.grid()
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.spines['bottom'].set_visible(False)

    B = pandas.read_csv(args.baseline)

    df = df[(df.args_accuracy == args.accuracy)
            & (df.args_runtime == args.runtime)
            & (df.args_classifier == 'LOGISTIC')]

    # filter out points we didn't use from the baseline.
    used = []
    for _, lols in df.iterrows():
        [ix] = B[B.tradeoff == lols.args_initializer_penalty].index
        used.append(ix)
    B = B.ix[used]

    lambda_cone(B[ACCURACY],
                B[RUNTIME] * rescale,
                ax,
                c=c_baseline,
                conesize=0.05)

    lols_front = []

    for _, lols in df.iterrows():

        penalty = lols.args_initializer_penalty
        init = B[B.tradeoff == penalty]
        assert len(init) == 1

        x = float(init[RUNTIME] * rescale)
        y = float(init[ACCURACY])

        tradeoff = lols.tradeoff / rescale

        # baseline vector (target direction)
        arrow(x, y, tradeoff, offset=-0.05, c=c_vec_baseline, ax=ax)

        # baseline point
        ax.scatter([B[RUNTIME] * rescale], [B[ACCURACY]],
                   c=c_vec_baseline,
                   lw=0,
                   s=9)

        # LOLS learning curve (squirt)
        squirt = pandas.read_csv(lols.log)
        ax.scatter(squirt[RUNTIME] * rescale,
                   squirt[ACCURACY],
                   lw=0,
                   c=c_lols,
                   s=8,
                   alpha=.5)

        print 'acc iter1: %g init: %g' % (
            squirt[squirt.iteration == 1][ACCURACY], init[ACCURACY])
        print 'run iter1: %g init: %g' % (
            squirt[squirt.iteration == 1][RUNTIME], init[RUNTIME])

        assert abs(
            float(squirt[squirt.iteration == 1][ACCURACY]) -
            float(init[ACCURACY])) < 1e-3
        assert abs(
            float(squirt[squirt.iteration == 1][RUNTIME]) -
            float(init[RUNTIME])) < 1e-3

        # re-do early stopping
        early_stop = squirt[ACCURACY] - squirt[RUNTIME] * squirt.tradeoff
        lols = squirt.ix[early_stop.argmax()]
        #        lols = squirt.ix[squirt.iteration.argmax()]

        if abs(x - lols[RUNTIME] * rescale) + abs(y - lols[ACCURACY]) > 1e-10:
            # LOLS vector
            ax.annotate("",
                        xy=(lols[RUNTIME] * rescale, lols[ACCURACY]),
                        xytext=(x, y),
                        arrowprops=dict(arrowstyle="->",
                                        lw=2,
                                        color=c_vec_lols,
                                        connectionstyle="arc3"))
        else:
            print colors.yellow % 'no lols vector for this point'
            print abs(x -
                      lols[RUNTIME] * rescale) + abs(y - lols[ACCURACY]), abs(
                          x - lols[RUNTIME] * rescale), abs(y - lols[ACCURACY])
            print 'early stop iteration', lols.iteration
            print early_stop

        # LOLS vector end point (early stopping
        ax.scatter([lols[RUNTIME] * rescale], [lols[ACCURACY]],
                   c=c_vec_lols,
                   s=13)

        # show ugly read arrow to the last point.
        if 0:
            last = squirt.ix[squirt.iteration.argmax()]
            ax.annotate("",
                        xy=(last[RUNTIME] * rescale, last[ACCURACY]),
                        xytext=(x, y),
                        arrowprops=dict(arrowstyle="->",
                                        lw=2,
                                        color='r',
                                        connectionstyle="arc3"))

        lols_front.append([lols[RUNTIME] * rescale, lols[ACCURACY]])

    # LOLS pareto frontier.
    xx, yy = zip(*lols_front)
    show_frontier(xx,
                  yy,
                  c=c_vec_lols,
                  alpha=0.4,
                  zorder=10,
                  interpolation='linear-convex',
                  ax=ax)

    # tick labels.
    xx = B[RUNTIME] * rescale
    #xx = np.linspace(xx.min(), xx.max(), 12)

    if args.runtime == 'pops':
        ax.set_xlabel(r'runtime (avg constituents built)')
    elif args.runtime == 'mask':
        ax.set_xlabel('runtime (avg spans allowed)')
    elif args.runtime == 'pushes':
        ax.set_xlabel('Runtime (Avg $|E|$)')

    pl.xticks(xx, ['%.f' % (x / rescale) for x in xx], rotation=45)

    # show all learning curves.
    if 0:
        pl.figure()
        for _, lols in df.iterrows():
            squirt = pandas.read_csv(lols.log)
            #pl.figure()
            #pl.plot(squirt[RUNTIME]*rescale, squirt[ACCURACY])
            R = squirt[ACCURACY] - squirt.tradeoff * squirt[RUNTIME]
            pl.plot(squirt.iteration, R)

    #xx=B[ACCURACY]
    #pl.yticks(xx, ['%.3f' % (x) for x in xx])

    pl.ion()
    pl.show()

    #    # not ready for prime time because axes limits aren't set.
    #    if args.save:
    #        save = True
    #        if path(args.save).exists():
    #            save = False
    #            print bold % colors.yellow % "File exists (%s)" % args.save
    #            print bold % colors.yellow % "Overwrite existing file [y/N]?",
    #            if raw_input().strip().lower() in ('y','yes'):
    #                save = True
    #        if save:
    #            print bold % colors.yellow % "Saved file %s" % args.save
    #            pl.savefig(args.save)

    t = ['Controlled experiments (dev)']
    [G] = df.args_grammar.unique()
    if 'medium' in G:
        t.append('small grammar')
    elif 'big' in G:
        t.append('big grammar')

    [RO] = df.args_roll_out.unique()
    if 'CP' in RO:
        t.append('$r_{\\textit{CP}}$')
    elif 'DP' in RO:
        t.append('$r_{\\textit{DP}}$')
    elif 'BF' in RO:
        t.append('$r_{\\textit{BF}}$')
    elif 'HY' in RO:
        t.append('$r_{\\textit{HY}}$')

    if args.accuracy == 'expected_recall_avg':
        ax.set_ylabel('accuracy (expected binarized recall)')
    elif args.accuracy == 'evalb_avg':
        ax.set_ylabel('accuracy (avg single-sentence F1)')

    #pl.title(', '.join(t))

    print B[[ACCURACY, RUNTIME]]

    ax.figure.tight_layout()
    pl.ioff()
    pl.show()
Exemple #58
0
def create_images_png(filename, outfilename='Default'):
    """Creates the original, clean, and mask images in single a PNG.
    Useful for checking how well LACosmic worked.

    You may want to adjust the size of the "cut" images so to
    capture your source star.

    Parameters:
        filename : string
            Name of the original FITS image, including the path.
        outfilename : string, optional
            Name of the outfile PNG.

    Returns:
        nothing

    Outputs:
        PNG file. ``<file rootname>.png`` by default.
        Shows both full frame images and "cut" images of the source.
    """
    pylab.ioff()
    # create page for plots
    page_width = 21.59 / 2
    page_height = 27.94 / 2
    fig = pylab.figure(figsize=(page_width, page_height))

    file_clean = (filename.split('.fits')[0] + '.clean.fits')
    file_mask = (filename.split('.fits')[0] + '.mask.fits')

    scmax = 7000  # scale_max for raw and clean
    scmin = 3  # scale_min for raw and clean

    # Plot the original image
    pylab.subplot(3, 2, 1, aspect='equal')  # 311
    image_orig = fits.open(filename)
    image_orig_ext = image_orig[1].data
    image_orig_scaled = img_scale.log(image_orig_ext, \
                                      scale_min=scmin, \
                                      scale_max=scmax)
    plt_orig = pylab.imshow(image_orig_scaled, aspect='equal')
    pylab.title('Original (SCI)')

    # Plot cut of original image
    pylab.subplot(3, 2, 2, aspect='equal')
    image_orig_cut = img_scale.log(image_orig_ext[175:275,175:275], \
                                   scale_min=scmin, \
                                   scale_max=scmax)
    plt_orig_cut = pylab.imshow(image_orig_cut, aspect='equal')
    pylab.title('Original (SCI)')
    image_orig.close()

    # Plot the mask image
    pylab.subplot(3, 2, 3)  #312
    image_mask = fits.open(file_mask)
    image_mask_ext = image_mask[0].data
    plt_orig = pylab.imshow(image_mask_ext, aspect='equal', vmin=-2,
                            vmax=1)  #-2, -5
    pylab.title('Mask')

    # Plot cut of the mask image
    pylab.subplot(3, 2, 4)
    plt_mask_cut = pylab.imshow(image_mask_ext[175:275,175:275], \
                                aspect='equal', vmin=-2, vmax=1)
    pylab.title('Mask')
    image_mask.close()

    # Plot the LACosmic-cleaned image
    pylab.subplot(3, 2, 5)  #313
    image_clean = fits.open(file_clean)
    image_clean_ext = image_clean[0].data
    image_clean_scaled = img_scale.log(image_clean_ext, \
                                       scale_min=scmin, \
                                       scale_max=scmax)
    plt_clean = pylab.imshow(image_clean_scaled, aspect='equal')
    pylab.title('Clean')

    #Plot cut of the LACosmic-cleaned image
    pylab.subplot(3, 2, 6)
    image_clean_cut = img_scale.log(image_clean_ext[175:275,175:275], \
                                    scale_min=scmin, scale_max=scmax)
    plt_clean_cut = pylab.imshow(image_clean_cut, aspect='equal')
    pylab.title('Clean')
    image_clean.close()

    if outfilename == 'Default':
        pylab.savefig(filename.split('.fits')[0] + '.png')
    else:
        pylab.savefig(outfilename)
    pylab.close()
    pylab.ion()
Exemple #59
0
def view_patches_bar(Yr, A, C, b, f, d1, d2, YrA=None, img=None):
    """view spatial and temporal components interactively

     Parameters:
     -----------
     Yr:    np.ndarray
            movie in format pixels (d) x frames (T)

     A:     sparse matrix
                matrix of spatial components (d x K)

     C:     np.ndarray
                matrix of temporal components (K x T)

     b:     np.ndarray
                spatial background (vector of length d)

     f:     np.ndarray
                temporal background (vector of length T)

     d1,d2: np.ndarray
                frame dimensions

     YrA:   np.ndarray
                 ROI filtered residual as it is given from update_temporal_components
                 If not given, then it is computed (K x T)

     img:   np.ndarray
                background image for contour plotting. Default is the image of all spatial components (d1 x d2)

    """

    pl.ion()
    if 'csc_matrix' not in str(type(A)):
        A = csc_matrix(A)
    if 'array' not in str(type(b)):
        b = b.toarray()

    nr, T = C.shape
    nb = f.shape[0]
    nA2 = np.sqrt(np.array(A.power(2).sum(axis=0))).squeeze()

    if YrA is None:
        Y_r = spdiags(old_div(1, nA2), 0, nr, nr) * (A.T.dot(Yr) -
                                                     (A.T.dot(b)).dot(f) -
                                                     (A.dot(A)).dot(C)) + C
    else:
        Y_r = YrA + C

    if img is None:
        img = np.reshape(np.array(A.mean(axis=1)), (d1, d2), order='F')

    fig = pl.figure(figsize=(10, 10))

    axcomp = pl.axes([0.05, 0.05, 0.9, 0.03])

    ax1 = pl.axes([0.05, 0.55, 0.4, 0.4])
    ax3 = pl.axes([0.55, 0.55, 0.4, 0.4])
    ax2 = pl.axes([0.05, 0.1, 0.9, 0.4])

    s_comp = Slider(axcomp, 'Component', 0, nr + nb - 1, valinit=0)
    vmax = np.percentile(img, 98)

    def update(val):
        i = np.int(np.round(s_comp.val))
        print(('Component:' + str(i)))

        if i < nr:

            ax1.cla()
            imgtmp = np.reshape(A[:, i].toarray(), (d1, d2), order='F')
            ax1.imshow(imgtmp, interpolation='None', cmap=pl.cm.gray)
            ax1.set_title('Spatial component ' + str(i + 1))
            ax1.axis('off')

            ax2.cla()
            ax2.plot(np.arange(T), Y_r[i], 'c', linewidth=3)
            ax2.plot(np.arange(T), C[i], 'r', linewidth=2)
            ax2.set_title('Temporal component ' + str(i + 1))
            ax2.legend(labels=['Filtered raw data', 'Inferred trace'])

            ax3.cla()
            ax3.imshow(img, interpolation='None', cmap=pl.cm.gray, vmax=vmax)
            imgtmp2 = imgtmp.copy()
            imgtmp2[imgtmp2 == 0] = np.nan
            ax3.imshow(imgtmp2,
                       interpolation='None',
                       alpha=0.5,
                       cmap=pl.cm.hot)
            ax3.axis('off')
        else:
            ax1.cla()
            bkgrnd = np.reshape(b[:, i - nr], (d1, d2), order='F')
            ax1.imshow(bkgrnd, interpolation='None')
            ax1.set_title('Spatial background ' + str(i + 1 - nr))
            ax1.axis('off')

            ax2.cla()
            ax2.plot(np.arange(T), np.squeeze(np.array(f[i - nr, :])))
            ax2.set_title('Temporal background ' + str(i + 1 - nr))

    def arrow_key_image_control(event):

        if event.key == 'left':
            new_val = np.round(s_comp.val - 1)
            if new_val < 0:
                new_val = 0
            s_comp.set_val(new_val)

        elif event.key == 'right':
            new_val = np.round(s_comp.val + 1)
            if new_val > nr + nb:
                new_val = nr + nb
            s_comp.set_val(new_val)
        else:
            pass

    s_comp.on_changed(update)
    s_comp.set_val(0)
    fig.canvas.mpl_connect('key_release_event', arrow_key_image_control)
    pl.show()
def mollzoom(map=None,
             fig=None,
             rot=None,
             coord=None,
             unit='',
             xsize=800,
             title='Mollweide view',
             nest=False,
             min=None,
             max=None,
             flip='astro',
             remove_dip=False,
             remove_mono=False,
             gal_cut=0,
             format='%g',
             cmap=None,
             norm=None,
             hold=False,
             margins=None,
             sub=None):
    """Interactive mollweide plot with zoomed gnomview.
    
    Parameters:
    -----------
    map : float, array-like shape (Npix,)
      An array containing the map, 
      supports masked maps, see the `ma` function.
      if None, use map with inf value (white map), useful for
      overplotting
    fig : a figure number. 
      Default: create a new figure
    rot : scalar or sequence, optional
      Describe the rotation to apply.
      In the form (lon, lat, psi) (unit: degrees) : the point at
      longitude *lon* and latitude *lat* will be at the center. An additional rotation
      of angle *psi* around this direction is applied.
    coord : sequence of character, optional
      Either one of 'G', 'E' or 'C' to describe the coordinate
      system of the map, or a sequence of 2 of these to rotate
      the map from the first to the second coordinate system.
    unit : str, optional
      A text describing the unit of the data. Default: ''
    xsize : int, optional
      The size of the image. Default: 800
    title : str, optional
      The title of the plot. Default: 'Mollweide view'
    nest : bool, optional
      If True, ordering scheme is NESTED. Default: False (RING)
    min : float, optional
      The minimum range value
    max : float, optional
      The maximum range value
    flip : {'astro', 'geo'}, optional
      Defines the convention of projection : 'astro' (default, east towards left, west towards right)
      or 'geo' (east towards roght, west towards left)
    remove_dip : bool, optional
      If :const:`True`, remove the dipole+monopole
    remove_mono : bool, optional
      If :const:`True`, remove the monopole
    gal_cut : float, scalar, optional
      Symmetric galactic cut for the dipole/monopole fit.
      Removes points in latitude range [-gal_cut, +gal_cut]
    format : str, optional
      The format of the scale label. Default: '%g'
    """
    import pylab
    # create the figure (if interactive, it will open the window now)
    f = pylab.figure(fig, figsize=(10.5, 5.4))
    extent = (0.02, 0.25, 0.56, 0.72)
    # Starting to draw : turn interactive off
    wasinteractive = pylab.isinteractive()
    pylab.ioff()
    try:
        if map is None:
            map = np.zeros(12) + np.inf
        map = pixelfunc.ma_to_array(map)
        ax = PA.HpxMollweideAxes(f,
                                 extent,
                                 coord=coord,
                                 rot=rot,
                                 format=format,
                                 flipconv=flip)
        f.add_axes(ax)
        if remove_dip:
            map = pixelfunc.remove_dipole(map,
                                          gal_cut=gal_cut,
                                          nest=nest,
                                          copy=True,
                                          verbose=True)
        elif remove_mono:
            map = pixelfunc.remove_monopole(map,
                                            gal_cut=gal_cut,
                                            nest=nest,
                                            copy=True,
                                            verbose=True)
        ax.projmap(map,
                   nest=nest,
                   xsize=xsize,
                   coord=coord,
                   vmin=min,
                   vmax=max,
                   cmap=cmap,
                   norm=norm)
        im = ax.get_images()[0]
        b = im.norm.inverse(np.linspace(0, 1, im.cmap.N + 1))
        v = np.linspace(im.norm.vmin, im.norm.vmax, im.cmap.N)
        if matplotlib.__version__ >= '0.91.0':
            cb = f.colorbar(ax.get_images()[0],
                            ax=ax,
                            orientation='horizontal',
                            shrink=0.5,
                            aspect=25,
                            ticks=PA.BoundaryLocator(),
                            pad=0.05,
                            fraction=0.1,
                            boundaries=b,
                            values=v)
        else:
            # for older matplotlib versions, no ax kwarg
            cb = f.colorbar(ax.get_images()[0],
                            orientation='horizontal',
                            shrink=0.5,
                            aspect=25,
                            ticks=PA.BoundaryLocator(),
                            pad=0.05,
                            fraction=0.1,
                            boundaries=b,
                            values=v)
        ax.set_title(title)
        ax.text(0.86,
                0.05,
                ax.proj.coordsysstr,
                fontsize=14,
                fontweight='bold',
                transform=ax.transAxes)
        cb.ax.text(1.05,
                   0.30,
                   unit,
                   fontsize=14,
                   fontweight='bold',
                   transform=cb.ax.transAxes,
                   ha='left',
                   va='center')
        f.sca(ax)

        ## Gnomonic axes
        #extent = (0.02,0.25,0.56,0.72)
        g_xsize = 600
        g_reso = 1.
        extent = (0.60, 0.04, 0.38, 0.94)
        g_ax = PA.HpxGnomonicAxes(f,
                                  extent,
                                  coord=coord,
                                  rot=rot,
                                  format=format,
                                  flipconv=flip)
        f.add_axes(g_ax)
        if remove_dip:
            map = pixelfunc.remove_dipole(map,
                                          gal_cut=gal_cut,
                                          nest=nest,
                                          copy=True)
        elif remove_mono:
            map = pixelfunc.remove_monopole(map,
                                            gal_cut=gal_cut,
                                            nest=nest,
                                            copy=True)
        g_ax.projmap(map,
                     nest=nest,
                     coord=coord,
                     vmin=min,
                     vmax=max,
                     xsize=g_xsize,
                     ysize=g_xsize,
                     reso=g_reso,
                     cmap=cmap,
                     norm=norm)
        im = g_ax.get_images()[0]
        b = im.norm.inverse(np.linspace(0, 1, im.cmap.N + 1))
        v = np.linspace(im.norm.vmin, im.norm.vmax, im.cmap.N)
        if matplotlib.__version__ >= '0.91.0':
            cb = f.colorbar(g_ax.get_images()[0],
                            ax=g_ax,
                            orientation='horizontal',
                            shrink=0.5,
                            aspect=25,
                            ticks=PA.BoundaryLocator(),
                            pad=0.08,
                            fraction=0.1,
                            boundaries=b,
                            values=v)
        else:
            cb = f.colorbar(g_ax.get_images()[0],
                            orientation='horizontal',
                            shrink=0.5,
                            aspect=25,
                            ticks=PA.BoundaryLocator(),
                            pad=0.08,
                            fraction=0.1,
                            boundaries=b,
                            values=v)
        g_ax.set_title(title)
        g_ax.text(-0.07,
                  0.02,
                  "%g '/pix,   %dx%d pix" %
                  (g_ax.proj.arrayinfo['reso'], g_ax.proj.arrayinfo['xsize'],
                   g_ax.proj.arrayinfo['ysize']),
                  fontsize=12,
                  verticalalignment='bottom',
                  transform=g_ax.transAxes,
                  rotation=90)
        g_ax.text(-0.07,
                  0.8,
                  g_ax.proj.coordsysstr,
                  fontsize=14,
                  fontweight='bold',
                  rotation=90,
                  transform=g_ax.transAxes)
        lon, lat = np.around(g_ax.proj.get_center(lonlat=True),
                             g_ax._coordprec)
        g_ax.text(0.5,
                  -0.03,
                  'on (%g,%g)' % (lon, lat),
                  verticalalignment='center',
                  horizontalalignment='center',
                  transform=g_ax.transAxes)
        cb.ax.text(1.05,
                   0.30,
                   unit,
                   fontsize=14,
                   fontweight='bold',
                   transform=cb.ax.transAxes,
                   ha='left',
                   va='center')
        # Add graticule info axes
        grat_ax = pylab.axes([0.25, 0.02, 0.22, 0.25])
        grat_ax.axis('off')
        # Add help text
        help_ax = pylab.axes([0.02, 0.02, 0.22, 0.25])
        help_ax.axis('off')
        t = help_ax.transAxes
        help_ax.text(0.1,
                     0.8,
                     'r/t .... zoom out/in',
                     transform=t,
                     va='baseline')
        help_ax.text(0.1,
                     0.65,
                     'p/v .... print coord/val',
                     transform=t,
                     va='baseline')
        help_ax.text(0.1,
                     0.5,
                     'c ...... go to center',
                     transform=t,
                     va='baseline')
        help_ax.text(0.1,
                     0.35,
                     'f ...... next color scale',
                     transform=t,
                     va='baseline')
        help_ax.text(0.1,
                     0.2,
                     'k ...... save current scale',
                     transform=t,
                     va='baseline')
        help_ax.text(0.1,
                     0.05,
                     'g ...... toggle graticule',
                     transform=t,
                     va='baseline')
        f.sca(g_ax)
        # Set up the zoom capability
        zt = ZoomTool(map,
                      fig=f.number,
                      nest=nest,
                      cmap=cmap,
                      norm=norm,
                      coord=coord)
    finally:
        pylab.draw()
        if wasinteractive:
            pylab.ion()