Ejemplo n.º 1
0
def save_spectrogram(notes, filename):
    window_size = 512
    overlap = 512/2

    num_buffers = int(len(notes)/window_size)*2
    window = scipy.signal.blackman(window_size)

    data = []
    for i in xrange(num_buffers):
        start = i * overlap
        end = start + window_size

        note_data = notes[start:end]

        if len(note_data) < window_size:
            break

        db = db_spectrum(note_data, window)
        
        data.append(db)

    data = np.transpose(np.array(data))

    pl.imshow(data, origin='lower', aspect='auto', interpolation='nearest')
    pl.suptitle(filename)
    pl.ylabel('Frequency (log)')
    pl.xlabel('Time (bins)')

    pl.savefig(filename)
    pl.show()
Ejemplo n.º 2
0
 def plotmodel(tractor, sig1, tt):
     mod = tractor.getModelImage(0)
     data = tractor.getImage(0).getImage()
     noise = np.random.normal(size=data.shape) * sig1
     imchi = dict(interpolation='nearest', origin='lower', cmap='RdBu',
                  vmin=-3, vmax=3)
     plt.clf()
     plt.subplot(2,2,1)
     plt.imshow(data, **ima)
     plt.title('Image')
     plt.xticks([]); plt.yticks([])
     plt.subplot(2,2,2)
     plt.imshow(mod, **ima)
     plt.title('Model')
     plt.xticks([]); plt.yticks([])
     plt.subplot(2,2,3)
     plt.imshow(mod+noise, **ima)
     plt.title('Model + noise')
     plt.xticks([]); plt.yticks([])
     plt.subplot(2,2,4)
     # show mod - img to match "red-to-blue" colormap.
     plt.imshow(-(data - mod)/sig1, **imchi)
     plt.xticks([]); plt.yticks([])
     plt.title('Chi')
     plt.suptitle(tt)
     ps.savefig()
Ejemplo n.º 3
0
def sim_results(obs, modes, stars, model, data):


    synth = model.generate_data(modes)
    synth_stats = model.summary_stats(synth)
    obs_stats = model.summary_stats(obs)


    f = plt.figure(figsize=(15,3))
    plt.suptitle('Obs Cand.:{}; Sim Cand.:{}'.format(obs.size, synth.size))
    plt.rc('legend', fontsize='xx-small', frameon=False)
    plt.subplot(121)
    bins = opt_bin(obs_stats[0],synth_stats[0])
    plt.hist(obs_stats[0], bins=bins, histtype='step', label='Data', lw=2)
    plt.hist(synth_stats[0], bins=bins, histtype='step', label='Simulation', lw=2)
    plt.xlabel(r'$\xi$')
    plt.legend()

    plt.subplot(122)
    bins = opt_bin(obs_stats[1],synth_stats[1])
    plt.hist(obs_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
                                          1),
             histtype='step', label='Data', log=True, lw=2)
    plt.hist(synth_stats[1], bins=np.arange(bins.min()-0.5, bins.max()+1.5,
                                            1),
             histtype='step', label='Simulation', log=True, lw=2)
    plt.xlabel(r'$N_p$')
    plt.legend()
def transmission(path,g,src,dest):
 global disp_count
 global bar_colors
 global edge_colors
 global node_colors
 k=0
 j=0
 list_of_edges = g.edges() 
 for node in path :
  k=path.index(node)
  disp_count = disp_count + 1
  if k != (len(path)-1):
   k=path[k+1]
   j=list_of_edges.index((node,k))
   initialize_edge_colors(j)
   #ec[disp_count].remove(-3000) 
   pylab.subplot(121) 
   nx.draw_networkx(g,pos = nx.circular_layout(g),node_color= node_colors,edge_color = edge_colors)
   pylab.annotate("Source",node_positions[src])
   pylab.annotate("Destination",node_positions[dest])
   pylab.title("Transmission")

   he=initializeEnergies(disp_count)
   print he
   pylab.subplot(122)
   pylab.bar(left=[1,2,3,4,5],height=[300,300,300,300,300],width=0.5,color = ['w','w','w','w','w'],linewidth=0) 
   pylab.bar(left=[1,2,3,4,5],height=initializeEnergies(disp_count),width=0.5,color = 'b') 
   pylab.title("Node energies")
   #pylab.legend(["already passed throgh","passing" , "yet to pass"])
   pylab.xlabel('Node number')
   pylab.ylabel('Energy') 
   pylab.suptitle('Leach Protocol', fontsize=12)
   pylab.pause(2)
  else :
     return
    def log_posterior(self,theta):

        model_g1 , model_g2, limit_mask , _ , _ = self.draw_model(theta)

        likelihood = self.log_likelihood(model_g1,model_g2,limit_mask)
        prior = self.log_prior(theta)
        if not np.isfinite(prior):
            posterior = -np.inf
        else:
            # use no info from prior for now
            posterior = likelihood 

        if logger.level == logging.DEBUG:
            n_progress = 10
        elif logger.level == logging.INFO:
            n_progress = 1000
        if self.n_model_evals % n_progress == 0:

            logger.info('%7d post=% 2.8e like=% 2.8e prior=% 2.4e M200=% 6.3e ' % (self.n_model_evals,posterior,likelihood,prior,theta[0]))

        if np.isnan(posterior):
            import pdb; pdb.set_trace()

        if self.save_all_models:

            self.plot_residual_g1g2(model_g1,model_g2,limit_mask)

            pl.suptitle('model post=% 10.8e M200=%5.2e' % (posterior,theta[0]) )
            filename_fig = 'models/res2.%04d.png' % self.n_model_evals
            pl.savefig(filename_fig)
            logger.debug('saved %s' % filename_fig)
            pl.close()


        return posterior
Ejemplo n.º 6
0
def main():
    base_path = "/caps2/tsupinie/1kmf-control/"
    temp = goshen_1km_temporal(start=14400, end=14400)
    grid = goshen_1km_grid()
    n_ens_members = 40

    np.seterr(all='ignore')

    ens = loadEnsemble(base_path, [ 11 ], temp.getTimes(), ([ 'pt', 'p' ], computeDensity))
    ens = ens[0, 0]

    zs = decompressVariable(nio.open_file("%s/ena001.hdfgrdbas" % base_path, mode='r', format='hdf').variables['zp'])
    xs, ys = grid.getXY()
    xs = xs[np.newaxis, ...].repeat(zs.shape[0], axis=0)
    ys = ys[np.newaxis, ...].repeat(zs.shape[0], axis=0)

    eff_buoy = effectiveBuoyancy(ens, (zs, ys, xs), plane={'z':10})
    print eff_buoy

    pylab.figure()
    pylab.contourf(xs[0], ys[0], eff_buoy[0], cmap=matplotlib.cm.get_cmap('RdBu_r'))
    pylab.colorbar()

    grid.drawPolitical()

    pylab.suptitle("Effective Buoyancy")
    pylab.savefig("eff_buoy.png")
    pylab.close()
    return
Ejemplo n.º 7
0
def group_plots(ylist, ncols, x = None,
		titles = None,
		suptitle = None,
		ylabels = None,
		figsize = None,
		sameyscale = True,
                order='C',
		imkw={}):
    import pylab as pl
    nrows = np.ceil(len(ylist)/float(ncols))
    figsize = ifnot(figsize, (2*ncols,2*nrows))
    fh, axs = pl.subplots(int(nrows), int(ncols),
                          sharex=True,
                          sharey=bool(sameyscale),
                          figsize=figsize)
    ymin,ymax = data_range(ylist)
    axlist = axs.ravel(order=order)
    for i,f in enumerate(ylist):
	x1 = ifnot(x, range(len(f)))
        _im = axlist[i].plot(x1,f,**imkw)
	if titles is not None:
            pl.setp(axlist[i], title = titles[i])
	if ylabels is not None:
            pl.setp(axlist[i], ylabel=ylabels[i])
    if suptitle:
        pl.suptitle(suptitle)
    return
Ejemplo n.º 8
0
def animation_compare(filename1, filename2, prefix, tend):
    num = 0
    for t in timestep(0,tend):
        t1,s1 = FieldInitializer.LoadState(filename1, t)
        t2,s2 = FieldInitializer.LoadState(filename2, t)
        rot1 = s1.CalculateRotationRodrigues()['z']
        rot2 = s2.CalculateRotationRodrigues()['z']

        pylab.figure(figsize=(5.12*2,6.12))
        rho = s2.CalculateRhoFourier().modulus()
        rot = s2.CalculateRotationRodrigues()['z']
        pylab.subplot(121)
        #pylab.imshow(rho*(rho>0.5), alpha=1, cmap=pylab.cm.bone_r)
        pylab.imshow(rot1)
        pylab.xticks([])
        pylab.yticks([])
        pylab.xlabel(r'$d_0$', fontsize=25)
        pylab.subplot(122)
        pylab.imshow(rot2)
        pylab.xticks([])
        pylab.yticks([])
        pylab.subplots_adjust(0.,0.,1.,1.,0.01,0.05)
        pylab.xlabel(r'$d_0/2$', fontsize=25)
        pylab.suptitle("Nabarro-Herring", fontsize=25)
        pylab.savefig("%s%04i.png" %(prefix, num))
        pylab.close('all')
        num = num + 1
Ejemplo n.º 9
0
    def plot_bases(self, autoscale=True, stampsize=None):
        import pylab as plt

        N = len(self.psfbases)
        cols = int(np.ceil(np.sqrt(N)))
        rows = int(np.ceil(N / float(cols)))
        plt.clf()
        plt.subplots_adjust(hspace=0, wspace=0)

        cut = 0
        if stampsize is not None:
            H, W = self.shape
            assert H == W
            cut = max(0, (H - stampsize) / 2)

        ima = dict(interpolation="nearest", origin="lower")
        if autoscale:
            mx = self.psfbases.max()
            ima.update(vmin=-mx, vmax=mx)
        nil, xpows, ypows = self.polynomials(0.0, 0.0, powers=True)
        for i, (xp, yp, b) in enumerate(zip(xpows, ypows, self.psfbases)):
            plt.subplot(rows, cols, i + 1)

            if cut > 0:
                b = b[cut:-cut, cut:-cut]
            if autoscale:
                plt.imshow(b, **ima)
            else:
                mx = np.abs(b).max()
                plt.imshow(b, vmin=-mx, vmax=mx, **ima)
            plt.xticks([])
            plt.yticks([])
            plt.title("x^%i y^%i" % (xp, yp))
        plt.suptitle("PsfEx eigen-bases")
Ejemplo n.º 10
0
def visualize_reduced_results(methods, combine_output, title="", plot_fn=None):
        """
        set up plots: T1-error, ROC, PRC
        """

        t0 = time.time()

        fig = pylab.figure()
        fig.set_size_inches(26,7)
        for mi, method in enumerate(methods):
            o = combine_output[mi]
            pylab.subplot(131)
            gw.draw_t1err_curve(o["t1err"][0], o["t1err"][1], method, o["num_trials"])

            pylab.subplot(132)
            draw_roc_curve(o["roc"][0], o["roc"][1], o["roc"][2], method)

            pylab.subplot(133)
            gw.draw_prc_curve(o["prc"][0], o["prc"][1], o["prc"][2], method)


        pylab.suptitle(title)

        print "time taken to draw figure", time.time()-t0

        if plot_fn is None:
            print "showing figure!"
            pylab.show()
        else:
            print "saving figure!"
            pylab.savefig(plot_fn, dpi=100)
Ejemplo n.º 11
0
def plot_weights(init_som, final_som, title=['SOM init', 'SOM final'], dim_lab=None):
	'''
	Function to plot neural weights before and after the training for each dimension.
	'''	
	
	assert init_som.shape == final_som.shape
	
	n, d = init_som.shape
	width = np.int(np.sqrt(n))
	
	if dim_lab is None:
		dim_lab = ['w' + str(i) for i in xrange(d)]

	fig = plt.figure()
	
	for lab, i in zip(dim_lab, xrange(d)):
			
		# plot weights before training
		plt.suptitle(title[0], fontsize = 14)
		ax = fig.add_subplot(2, d, i+1)
		img = init_som[:, i].reshape(width, width)
		ax.imshow(img, interpolation='nearest')
		plt.title(lab)

		# same weights after training

		ax = fig.add_subplot(2, d, (i+1) + d)
		if i==int(d/2.): plt.title(title[1])
		img_f = final_som[:, i].reshape(width, width)
		ax.imshow(img_f, interpolation='nearest')
Ejemplo n.º 12
0
    def plotGeometry(self,SNP,PCAD,title):
        fig=plt.figure(figsize=self.figsize, dpi=self.dpi);plt.ioff()
        SNP.loc['Reference0']=np.zeros(SNP.shape[1],dtype=int)
        plt.subplot(1,2,1)
        D=pd.DataFrame(None,index=SNP.index,columns=SNP.index)
        for i in SNP.index:
            for j in SNP.index:
                D.loc[i,j]= sum(np.logical_xor(SNP.loc[i],SNP.loc[j]))
        D=D.astype(float)
        im=plt.imshow(D,interpolation='nearest',cmap='Reds')
        plt.gca().xaxis.tick_top()
        x=np.arange(D.index.shape[0])
        plt.yticks(x,map(lambda x: x.replace('mdio','') ,D.index.values))
        plt.xticks(x,map(lambda x: x.replace('mdio','') ,D.columns))
        plt.colorbar(im)
        plt.gca().tick_params(axis='both', which='major', labelsize=10)
        plt.title('Pairwise Hamming Distance',y=1.03)

        
        plt.subplot(1,2,0)
        D=PCAD.astype(float)
        im=plt.imshow(D,interpolation='nearest',cmap='Reds')
        plt.gca().xaxis.tick_top()
        x=np.arange(D.index.shape[0])
        plt.yticks(x,map(lambda x: x.replace('mdio','') ,D.index.values))
        plt.xticks(x,map(lambda x: x.replace('mdio','') ,D.columns))
        plt.colorbar(im)
        plt.gca().tick_params(axis='both', which='major', labelsize=10)
        plt.title('Euclidean Distance in PC3',y=1.03)
        plt.suptitle('Figure {}. {}'.format(self.fignumber, title),fontsize=self.titleSizeSup); self.pdf.savefig(fig);self.fignumber+=1
Ejemplo n.º 13
0
def _show_plots(target, fitted, wt, wo, corrected):
    tau_NP, tau_P, attenuator, rate = target
    tau_NP_f, tau_P_f, attenuation_f, rate_f = fitted

    # Plot the results
    sim_pars = (r'Sim $\tau_{NP}=%g\,{\rm %s}$,  $\tau_{P}=%g\,{\rm %s}$,  ${\rm attenuator}=%g$'
               )%(tau_NP, DEADTIME_UNITS, tau_P, DEADTIME_UNITS, attenuator)
    fit_pars = (r'Fit $\tau_{NP}=%s$,  $\tau_P=%s$,  ${\rm attenuator}=%.2f$'
               )%(
                   ("%.2f"%tau_NP_f[0] if np.inf > tau_NP_f[1] > 0 else "-"),
                   ("%.2f"%tau_P_f[0] if np.inf > tau_P_f[1] > 0 else "-"),
                   1./attenuation_f[0],
               )
    title = '\n'.join((sim_pars, fit_pars))
    import pylab
    pylab.subplot(211)
    #pylab.errorbar(rate, rate_f[0], yerr=rate_f[1], fmt='c.', label='fitted rate')
    #mincident = np.linspace(rate[0], rate[-1], 400)
    #munattenuated = expected_rate(mincident, tau_NP_f[0], tau_P_f[0])
    #mattenuated = expected_rate(mincident/attenuator, tau_NP_f[0], tau_P_f[0])
    #minc = np.hstack((mincident, 0., mincident))
    #mobs = np.hstack((munattenuated, np.NaN, mattenuated))
    #pylab.plot(minc, mobs, 'c-', label='expected rate')
    pylab.errorbar(rate, uval(corrected), yerr=udev(corrected), fmt='r.', label='corrected rate')
    _show_rates(rate, wo, wt, attenuator, tau_NP_f[0], tau_P_f[0])
    pylab.subplot(212)
    _show_droop(rate, wo, wt, attenuator)
    pylab.suptitle(title)

    #pylab.figure(); _show_inversion(wo, tau_P_f, tau_NP_f)
    pylab.show()
Ejemplo n.º 14
0
def simulationDelayedTreatment():
    """
    Runs simulations and make histograms for problem 5.
    Runs multiple simulations to show the relationship between delayed treatment
    and patient outcome.
    Histograms of final total virus populations are displayed for delays of 300,
    150, 75, 0 timesteps (followed by an additional 150 timesteps of
    simulation).    
    """
    histBins = [i*50 for i in range(11)]
    delays = [0, 75, 150, 300]
    subplot = 1
    for d in delays:
        results = []
        for t in xrange(NUM_TRIALS):
            results.append(runSimulation(d))
        pylab.subplot(2, 2, subplot)
        subplot += 1
        pylab.hist(results, bins=histBins, label='delayed ' + str(d))
        pylab.xlim(0, 500)
        pylab.ylim(0, NUM_TRIALS)
        pylab.ylabel('number of patients')
        pylab.xlabel('total virus population')
        pylab.title(str(d) + ' time step delay')
        popStd = numpy.std(results)
        popMean = numpy.mean(results)
        ## print str(d)+' step delay standard deviation: '+str(popStd)
        ## print str(d)+' step mean: '+str(popMean)
        ## print str(d)+' step CV: '+str(popStd / popMean)
        ## print str(d) + ' step delay: ' + str(results)
    pylab.suptitle('Patient virus populations after 150 time steps when ' +\
                   'prescription\n' +\
                   'is applied after delays of 0, 75, 150, 300 time steps')
    pylab.show()    
Ejemplo n.º 15
0
def get_cav(c_mat,nchan,scaling=False):
        #Compute Cav by taking diags, averaging and reforming the matrix
        diags =[c_mat.diagonal(count) for count in xrange(nchan-1, -nchan,-1)]
        cav=n.zeros_like(c_mat)

        for count,count_chan in enumerate(range(nchan-1,-nchan,-1)):
                cav += n.diagflat( n.mean(diags[count]).repeat(len(diags[count])), count_chan)

        if scaling:
            temp=cav.copy()
            for count in xrange(nchan):
                cav[count,:] *= n.sqrt(c_mat[count,count]/temp[count,count])
                cav[:,count] *= n.sqrt(c_mat[count,count]/temp[count,count])

        if not n.allclose(cav.T.conj(),cav):
            print 'Cav is not Hermitian'
            print 'bl'

        if PLOT and False:
            p.subplot(131); capo.arp.waterfall(c_mat,mode='real',mx=3,drng=6);
            p.title('C')
            p.subplot(132); capo.arp.waterfall(cav,mode='real',mx=3,drng=6); p.colorbar()
            p.title('Cav')
            p.subplot(133); capo.arp.waterfall(diff,mode='real',mx=500,drng=500); p.colorbar()
            p.title('Abs Difference')
            p.suptitle('%d_%d'%a.miriad.bl2ij(bl))
            p.show()

        return cav
Ejemplo n.º 16
0
def fuzzy_arcs_equal(arcs0, arcs1, allowed_error_multiplier):
  # Compare two sets of arcs that should be roughly the same shape up to noise introduced by quantization and inexact constructions
  # allowed_error_multiplier should be in units of round trip error for quantization->offset->unquantization
  # Actual errors appear to be substantially higher than expected in many cases and I'm not sure why!
  combined = Nested.concatenate(arcs0, arcs1)
  quantization_scale = circle_arc_quantization_unit(combined)
  error_per_opp = quantization_scale*(circle_arc_max_quantization_error_guess()+circle_arc_max_offset_error_guess())
  # xor arcs to get difference
  delta = split_arcs_by_parity(combined)
  # We might have thin features around edges, but a small negative offset should erase everything
  max_error = (allowed_error_multiplier+1)*error_per_opp
  squeezed_delta = offset_arcs(delta,-max_error) # Add additional margin for error during offsetting
  similar = (len(squeezed_delta) == 0)
  area_error = abs(circle_arc_area(arcs0) - circle_arc_area(arcs1))
  # Sanity check that similar arcs have similar area
  # In theory area_error tolerance should grow proportional to max_error*arcs_perimeter_length, but a constant works for current usage
  if similar and not area_error < 3e-5:
    if 0:
      import pylab
      pylab.suptitle("area_error: %s" % area_error)
      subplot_arcs(combined, 121, "Combined", full=False)
      subplot_arcs(delta, 122, "delta", full=False)
      pylab.show()
    assert False
  return similar
Ejemplo n.º 17
0
def check_negative_offsets(test_arcs):
  d = 0.4
  # Offset inward then outward would normally erode sharp features, but we can use a positive offset to generate a shape with no sharp features
  outer = offset_arcs(test_arcs, d*1.5) # Ensure features big enough to not disappear if we inset/offset again
  inset = offset_arcs(outer, -d)
  reset = offset_arcs(inset, d)
  outer_area = circle_arc_area(outer)
  inset_area = circle_arc_area(inset)
  reset_area = circle_arc_area(reset)
  assert inset_area < outer_area # Offset by negative amount should reduce area
  if not fuzzy_arcs_equal(outer, reset, 2):
    if 0:
      import pylab
      area_error = abs(outer_area - reset_area)
      pylab.suptitle("Offset: %s, Area error:%s" % (sub_d, area_error))
      print "outer_area:",outer_area
      print "inset_area:",inset_area
      print "reset_area:",reset_area
      subplot_arcs(outer, 131, "outer", full=False)
      subplot_arcs(inset, 132, "inset", full=False)
      subplot_arcs(reset, 133, "reset", full=False)
      pylab.show()
    assert False
  # Check that a large negative offset leaves nothing
  empty_arcs = offset_arcs(test_arcs, -100.)
  assert len(empty_arcs) == 0
Ejemplo n.º 18
0
	def histogram(self):
		# a method of the class to draw the histogram for the given year
		plt.figure()
		self.income_df.hist(column = 'Income', by = 'Region', figsize = (11.5, 8.5), bins = 20, xrot = 25, sharex = True, sharey = True)				
		pl.suptitle('Distriution of income in ' + str(self.year))
		plt.savefig('Historgram in '+ str(self.year) +'.pdf')
		plt.clf()
Ejemplo n.º 19
0
def check_offset_error(arcs):
  quantization_unit = circle_arc_quantization_unit(arcs)
  # Offset should introduce errors bounded by a small constant number of quantization unit plus some floating point rounding errors
  # I think the guaranteed error bound is probably higher than this, but I haven't found a test case where this fails
  error_per_offset = 5.*quantization_unit

  def thickness_ok(arcs, expected, max_error):
    thickness_lo = expected - max_error
    thickness_hi = expected + max_error
    return len(offset_arcs(arcs, -thickness_lo)) > 0 \
     and len(offset_arcs(arcs, -thickness_hi)) == 0

  # Find starting thickness
  starting_thickness = find_thickness(arcs,quantization_unit)
  # Offset inward by a random 
  delta = -starting_thickness*random.uniform(0.1,0.9)
  new_arcs = offset_arcs(arcs,delta)

  expected_new_thickness = starting_thickness + delta
  if not thickness_ok(new_arcs, expected_new_thickness, error_per_offset):
    new_thickness = find_thickness(new_arcs, quantization_unit)
    error = abs(new_thickness - expected_new_thickness)
    print "Starting thickness:",starting_thickness
    print "New thickness:",new_thickness
    print "Expected new thickness:",expected_new_thickness
    error_msg = "For delta of %s, resulting error = %s (%s quantization units)" % (delta, error, error/quantization_unit)
    if 0:
      import pylab
      pylab.suptitle(error_msg)
      subplot_arcs(arcs, 121, "Initial", full=False)
      subplot_arcs(new_arcs, 122, "Offset", full=False)
      pylab.show()
    assert False
    def createBasisGraph(self, data):
        plt.figure(self.nFig)
        plt.suptitle('Basis')
        nBase = data.shape[1]
        # The cols number of subplot is 
        nSubCols = nBase / 10
        if nSubCols > 0:
            if nBase % 2 == 0:
                nSubRows = nBase / nSubCols
            else:
                nSubRows = nBase / nSubCols + 1
        else:
            nSubRows = nBase 
            nSubCols = 1
        # freqList = np.fft.fftfreq(513, d = 1.0 / 44100)

        for i in range(nBase):
            nowFig = self.nFig + (i / nSubRows) + 1
            # Because Index of graph is start by 1, The Graph index start from i + 1.
            plt.subplot(nSubRows, nSubCols, i + 1)
            plt.tick_params(labelleft='off', labelbottom='off')

            # FIXME
            #plt.ylabel(self.st5[i%12] + str(i/12  + 1))
            plt.ylabel(str(i))
            plt.plot(data[:,i])
        # Beacuse I want to add lable in bottom, xlabel is declaration after loop.
        plt.tick_params(labelleft='off', labelbottom='on')
        plt.xlabel('frequency [Hz]')

        #self.nFig += nowFig
        self.nFig += 1 
Ejemplo n.º 21
0
def plotSurface(pt, td, winds, map, stride, title, file_name):
    pylab.figure()
    pylab.axes((0.05, 0.025, 0.9, 0.9))

    u, v = winds

    nx, ny = pt.shape
    gs_x, gs_y = goshen_3km_gs
    xs, ys = np.meshgrid(gs_x * np.arange(nx), gs_y * np.arange(ny))   

    data_thin = tuple([ slice(None, None, stride) ] * 2)

    td_cmap = matplotlib.cm.get_cmap('Greens')
    td_cmap.set_under('#ffffff')
    pylab.contourf(xs, ys, td, levels=np.arange(40, 80, 5), cmap=td_cmap)
    pylab.colorbar()
    CS = pylab.contour(xs, ys, pt, colors='r', linestyles='-', linewidths=1.5, levels=np.arange(288, 324, 4))
    pylab.clabel(CS, inline_spacing=0, fmt="%d K", fontsize='x-small')
    pylab.quiver(xs[data_thin], ys[data_thin], u[data_thin], v[data_thin])

    drawPolitical(map, scale_len=75)

    pylab.suptitle(title)
    pylab.savefig(file_name)
    pylab.close()
    return
 def _barplot_engine(self, suptitle, numIter=None, saveas=None, show=True):
     '''
     code to support a barchart visualization of the results, with one
     bar chart per iteration.
     '''
     res = self.annotation_dat if numIter is None else self.annotation_dat[0:numIter]
     
     pl.figure(num=1)
         
     #AS is complete Annotation Set, max_count is the maximum number
     # of genes that appears in any single annotation entry
     (AS, max_count) = self._common_Y()
     
     for (i, dat, genelist) in res:
         if len(dat) < 1: continue
         pl.subplot(2, math.ceil( len(res)/2.0), i+1)
         title = "Iteration %d (%d Genes)"%((i+1), len(genelist))
         flag1=False if i in [0,5] else True
         
         self._plot_GO_bar_chart(i, title, annotation_set=AS, xlim=[0,max_count],
                               suppress_ylab=flag1, suppress_desc=False, desc_filter=True,
                               color="yellow", show=False)  
     pl.subplots_adjust(left=0.1, right=0.95, top=0.90, bottom=0.05, wspace=0.1, hspace=0.2)
     if not suptitle is None:
         pl.suptitle("Ontology Annotations per Iteration: %s"%suptitle)
     if not saveas is None:
         pl.savefig(saveas)   
     if show: pl.show()
Ejemplo n.º 23
0
def compare_expvaluecollections(coll1, coll2, show=True, **kwargs):
    """
    Plot all subsystems of two ExpectationValueCollections.

    *Arguments*
        * *coll1*
            First :class:`pycppqed.expvalues.ExpectationValue.Collection`.

        * *coll2*
            Second :class:`pycppqed.expvalues.ExpectationValue.Collection`.

        * *show* (optional):
            If True pylab.show() is called finally. This means a plotting
            window will pop up automatically. (Default is True)

        * Any other arguments that the pylab plotting command can use.

    This function allows a fast comparison between two sets of expectation
    values that were obtained by different calculations.
    """
    import pylab
    s1 = coll1.subsystems
    s2 = coll2.subsystems
    assert len(s1) == len(s2)
    for i in range(len(s1)):
        pylab.figure()
        _compare_expvaluesubsystems(s1.values()[i], s2.values()[i],
                                        show=False, **kwargs)
        title = "%s vs. %s" % (s1.keys()[i], s2.keys()[i])
        if hasattr(pylab, "suptitle"): # For old versions not available.
            pylab.suptitle(title)
            pylab.gcf().canvas.set_window_title(title)
    if show:
        pylab.show()
Ejemplo n.º 24
0
def ks_gof(mcmc, samples, format="png"):
    """Runs ks_2samp test and plots Observed/Expected vs. Simulated/Expected"""
    size = len(samples)
    #Check for bedrock data
    for sample in samples:
        if isinstance(sample, BedrockSample):
            size = len(samples)-1
    #Set size and create grid
    if size == 1:
        fig = plt.figure(figsize=(10, 10))
    else:
        fig = plt.figure(figsize=(6, 10))
    grid = ag.axes_grid.Grid(fig, 111, nrows_ncols = (size, 1), axes_pad = 1, share_x=False, share_y=False, label_mode = "all" )
    j = 0 
    for sample in samples:
        if isinstance(sample, DetritalSample):
            #obs = mcmc.get_node("ObsAge_" + sample.name)
            sim = mcmc.get_node("SimAge_" + sample.name)
            exp = mcmc.get_node("ExpAge_" + sample.name)
            d_simExp=[]
            d_obsExp=[]
            #import ipdb; ipdb.set_trace()
            for i in range(len(exp.trace()[:,-1])):
                D, P = sp.stats.ks_2samp(mcmc.trace(exp)[i,-1], mcmc.trace(sim)[i,-1])
                d_simExp.append(D)
                D, P = sp.stats.ks_2samp(mcmc.trace(exp)[i,-1], sample.ages)
                d_obsExp.append(D)
                
            #The test statistics generated from ks_2samp plot as a grid. The following adds
            #random uniform noise for a better visual representation on the plot.
            noise=(0.5/len(sample.ages))
            for i in range(len(d_simExp)):
                d_simExp[i] = d_simExp[i] + np.random.uniform(-noise, noise, 1)
                d_obsExp[i] = d_obsExp[i] + np.random.uniform(-noise, noise, 1)

            #Calculate p-value (The proportion of test statistics above the y=x line)
            count=0
            for i in range(len(d_simExp)):
                if (d_simExp[i]>d_obsExp[i]):
                    count=count+1
            count=float(count)
            p_value=(count/len(d_simExp))
    
            #Plot
            grid[j].scatter(d_obsExp, d_simExp, color='gray', edgecolors='black')
            grid[j].set_xlabel('Observed/Expected', fontsize='small')
            grid[j].set_ylabel('Simulated/Expected', fontsize='small')
            label = sample.name + ", " + "p-value="+"%f"%p_value
            grid[j].set_title(label, fontsize='medium')

            #Add a y=x line to plot
            if (max(d_simExp)>=max(d_obsExp)):
                grid[j].plot([0,max(d_simExp)],[0,max(d_simExp)], color='black')
            else:
                grid[j].plot([0,max(d_obsExp)],[0,max(d_obsExp)], color='black')
            j+=1
            
            
    plt.suptitle('Kolmogorov-Smirnov Statistics', fontsize='large')
    fig.savefig("KS_test."+format)
Ejemplo n.º 25
0
def expvaluecollection(evc, show=True, **kwargs):
    """
    Visualize a :class:`pycppqed.expvalues.ExpectationValueCollection`.

    *Usage*
        >>> import numpy as np
        >>> T = np.linspace(0,10)
        >>> d = (np.sin(T), np.cos(T))
        >>> titles = ("<x>", "<y>")
        >>> evc = pycppqed.expvalues.ExpectationValueCollection(d, T, titles)
        >>> evc.plot()

    *Arguments*
        * *evc*
            A :class:`pycppqed.expvalues.ExpectationValueCollection`.

        * *show* (optional):
            If True pylab.show() is called finally. This means a plotting
            window will pop up automatically. (Default is True)

        * Any other arguments that the pylab plotting command can use.
    """
    if evc.subsystems:
        import pylab
        for sysname, data in evc.subsystems.iteritems():
            pylab.figure()
            _expvalues(data.evtrajectories, show=False, **kwargs)
            if hasattr(pylab, "suptitle"): # For old versions not available.
                pylab.suptitle(sysname)
                pylab.gcf().canvas.set_window_title(sysname)
        if show:
            pylab.show()
    else:
        titles = ["(%s) %s" % (i, title) for i, title in enumerate(evc.titles)]
        _expvalues(evc.evtrajectories, titles, show=show, **kwargs)
    def createCoefGraph(data, nFig, lim, ymin):
        plt.figure(nFig)
        plt.suptitle('Coef')
        nBase = data.shape[0]
        nSubCols = nBase / 10
        if nSubCols > 0:
            nSubRows = nBase / nSubCols
        else:
            nSubRows = nBase 
            nSubCols = 	1
        # print data.shape

        # サンプリング周波数とシフト幅によって式を変える必要あり
        timeLine = [i * 1024 / 8000.0 for i in range(data.shape[1])]
        # print len(timeLine)
        for i in range(nBase):
            plt.subplot(nSubRows, nSubCols, i + 1)
            plt.tick_params(labelleft='off', labelbottom='off')
            # FIXME: Arguments of X
            # plt.plot(timeLine, data[i,:])
            if lim:
                plt.ylim(ymin=ymin)
            plt.plot(timeLine, data[i,:])
        # Beacuse I want to add lable in bottom, xlabel is declaration after loop.
        plt.tick_params(labelleft='off', labelbottom="on")
        plt.xlabel('time [ms]')
Ejemplo n.º 27
0
	def NeutralLinguagram(self, M, savename, start=1):
		fakeX = []
		for i in range(len(M)):
			xs = []
			for j in range(1,33):
				xs.append(j)
			fakeX.append(xs)
		
		x1 = array(fakeX)
		y1 = array(M)
		Z = []
		for i in range(start, (len(M)+start)):
			zs = []
			for j in range(32):
				zs.append(i)
			Z.append(zs)
		z1 = array(Z)
		
		fig = p.figure()
		ax = Axes3D(fig)
		ax.plot_surface(z1, -x1, y1, rstride=1, cstride=1, cmap=cm.jet)
		ax.view_init(90,-90)
		p.suptitle(savename[:-4])
		#p.show()
		
		p.savefig(savename, format = 'png')
Ejemplo n.º 28
0
Archivo: ip.py Proyecto: jahuth/ni
 def plot_prototypes(self):
         """ plots each of the components as a prototype (sum of fitted b-splines) and returns a dictionary of figures """
         figs = {}
         for h in np.unique(self.design.header):
                 fig = pl.figure()
                 splines = self.design.get(h)
                 if 'rate' in h:
                         pl.plot(np.sum(self.design.get(h)[:self.design.trial_length] * self.beta[self.design.getIndex(h)],1))
                         pl.title(h)
                 elif len(splines.shape) == 1 or (splines.shape[0] == 1):
                         pl.plot(np.sum(self.design.get(h) * self.beta[self.design.getIndex(h)],1),'o')
                         pl.title(h)
                 elif len(splines.shape) == 2:
                         pl.plot(np.sum(self.design.get(h) * self.beta[self.design.getIndex(h)],1))
                         pl.title(h)
                 elif len(splines.shape) == 3:
                         slices = np.zeros(splines.shape)
                         for (i, ind) in zip(range(splines.shape[0]),self.design.getIndex(h)):
                                 slices[i,:,:] = splines[i,:,:] * self.beta[ind]
                         pl.imshow(slices.sum(axis=0),cmap='jet')
                         figs[h + '_sum'] = fig
                         fig = pl.figure()
                         for i in range(len(slices)):
                                 pl.subplot(np.ceil(np.sqrt(slices.shape[0])),np.ceil(np.sqrt(slices.shape[0])),i+1)
                                 pl.imshow(slices[i],vmin=np.percentile(slices,1),vmax=np.percentile(slices,99),cmap='jet')
                         pl.suptitle(h)
                         figs[h] = fig
                 else:
                         pl.plot(np.sum(self.design.get(h) * self.beta[self.design.getIndex(h)],1))
                         pl.title(h)
                 figs[h] = fig
         return figs
Ejemplo n.º 29
0
def plot_multiplot_histogram(data):
    xcol=4
    ycol=6
    for index,attrib in enumerate(attribList):
        byAttrib=get_byAttrib(data,attrib)
        P.suptitle('Attribute Histograms')
        ax = P.subplot(xcol,ycol,index+1)
        plot_histogram(byAttrib,attrib=attribDict[attrib],bool_labels=False)
        for item in ax.get_xticklabels():
            item.set_fontsize(0)
        for item in ax.get_yticklabels():
            item.set_fontsize(0)
        if index % ycol == 0:
            P.ylabel('Probability')
            for item in ax.get_yticklabels():
                item.set_fontsize(8)
        if index > (xcol-1)*ycol-1:
            P.xlabel('Rank')
            for item in ax.get_xticklabels():
                item.set_fontsize(8)
        P.xlim([1,24])
        P.ylim([0,0.25])
        ax.yaxis.label.set_size(10)
        ax.xaxis.label.set_size(10)
        if np.sum(byAttrib[0:7,1])>2*np.sum(byAttrib[16:23,1]):
            P.text(20,0.20,'+')
        elif np.sum(byAttrib[16:23,1])>2*np.sum(byAttrib[0:7,1]):
            P.text(20,0.20,'-')
    P.subplots_adjust(hspace=.50)
    P.subplots_adjust(wspace=.50)
    P.subplots_adjust(bottom=0.1)
Ejemplo n.º 30
0
def run_demo(with_plots=True):
    """
    Demonstrates how to use PyFMI for simulation of 
    Co-Simulation FMUs (version 1.0).
    """
    fmu_name = O.path.join(path_to_fmus,'bouncingBall.fmu')
    model = load_fmu(fmu_name)
    
    res = model.simulate(final_time=2.)
    
    # Retrieve the result for the variables
    h_res = res['h']
    v_res = res['v']
    t     = res['time']

    assert N.abs(res.final('h') - (0.0424044)) < 1e-2
    
    # Plot the solution
    if with_plots:
        # Plot the height
        fig = P.figure()
        P.clf()
        P.subplot(2,1,1)
        P.plot(t, h_res)
        P.ylabel('Height (m)')
        P.xlabel('Time (s)')
        # Plot the velocity
        P.subplot(2,1,2)
        P.plot(t, v_res)
        P.ylabel('Velocity (m/s)')
        P.xlabel('Time (s)')
        P.suptitle('FMI Bouncing Ball')
        P.show()
Ejemplo n.º 31
0
def forced_phot():
    # Forced phot
    ps = PlotSequence('kick')
    plt.subplots_adjust(top=0.95, bottom=0.1, left=0.1, right=0.95)

    ff = [
        ('decam-348227-S15-g-forced.fits', 348227, 'S15', 'g'),
        ('decam-348248-S16-g-forced.fits', 348248, 'S16', 'g'),
        ('decam-348271-S14-g-forced.fits', 348271, 'S14', 'g'),
        ('decam-348660-S15-r-forced.fits', 348660, 'S15', 'r'),
        ('decam-348684-S16-r-forced.fits', 348684, 'S16', 'r'),
        ('decam-348712-S14-r-forced.fits', 348712, 'S14', 'r'),
        ('decam-349154-S15-z-forced.fits', 349154, 'S15', 'z'),
        ('decam-349183-S14-z-forced.fits', 349183, 'S14', 'z'),
        ('decam-346630-S16-z-forced.fits', 346630, 'S16', 'z'),
    ]
    FF = []
    for fn, expnum, extname, band in ff:
        F = fits_table(fn)
        print len(F), 'from', fn
        F.expnum = np.array([expnum] * len(F))
        F.extname = np.array([extname] * len(F))
        FF.append(F)
    F = merge_tables(FF)
    bricks = np.unique(F.brickname)
    print 'Bricks:', bricks
    T = merge_tables([
        fits_table(os.path.join('dr1', 'tractor', b[:3],
                                'tractor-%s.fits' % b)) for b in bricks
    ])
    print 'Total of', len(T), 'sources'
    T.cut(T.brick_primary == 1)
    print 'Cut to', len(T), 'brick_primary'

    Tall = T.copy()

    cutouts = []

    I, J, d = match_radec(Tall.ra,
                          Tall.dec,
                          Tall.ra,
                          Tall.dec,
                          5. / 3600.,
                          notself=True)
    K = np.flatnonzero(I < J)
    I = I[K]
    J = J[K]
    # randomize
    K = np.random.randint(2, size=len(I))
    I, J = I * K + J * (1 - K), I * (1 - K) + J * K

    plt.clf()
    plothist(
        3600. * (Tall.ra[I] - Tall.ra[J]) * np.cos(np.deg2rad(Tall.dec[I])),
        3600. * (Tall.dec[I] - Tall.dec[J]), 200)  #, range=((-2,2),(-2,2)))
    plt.xlabel('dRA (arcsec)')
    plt.ylabel('dDec (arcsec)')
    plt.title('%i sources, %i matches' % (len(Tall), len(I)))
    ps.savefig()

    dist = 3600. * np.hypot(
        (Tall.ra[I] - Tall.ra[J]) * np.cos(np.deg2rad(Tall.dec[I])),
        Tall.dec[I] - Tall.dec[J])

    cutouts.append((Tall, I[dist < 0.5], 'Close pairs', None))

    plt.clf()
    plt.hist(dist, 50, histtype='step', color='b')
    plt.xlabel('Match distance (arcsec)')
    plt.title('%i sources, %i matches' % (len(Tall), len(I)))
    ps.savefig()

    plt.clf()
    plothist(Tall.bx0[I] - Tall.bx0[J],
             Tall.by0[I] - Tall.by0[J],
             200,
             range=((-20, 20), (-20, 20)))
    plt.xlabel('d(x0)')
    plt.ylabel('d(y0)')
    plt.title('%i sources, %i matches' % (len(Tall), len(I)))
    ps.savefig()

    # plt.clf()
    # plt.plot(np.vstack((Tall.bx0[I], Tall.bx0[J])),
    #          np.vstack((Tall.by0[I], Tall.by0[J])), 'b-')
    # ps.savefig()

    T.srcid = (T.brickid.astype(np.int64) << 32 | T.objid)

    F.srcid = (F.brickid.astype(np.int64) << 32 | F.objid)
    print 'Total of', len(F), 'forced'
    print len(np.unique(F.srcid)), 'unique source in forced'

    tmap = dict([(s, i) for i, s in enumerate(T.srcid)])

    T.forced_g = [[] for i in range(len(T))]
    T.forced_r = [[] for i in range(len(T))]
    T.forced_z = [[] for i in range(len(T))]

    for f in F:
        i = tmap[f.srcid]
        forced = T.get('forced_%s' % f.filter)[i]
        forced.append(f.flux)

    allbands = 'ugrizY'

    # pack into arrays
    bands = 'grz'
    for band in bands:
        flist = T.get('forced_%s' % band)
        nmax = max([len(f) for f in flist])
        arr = np.zeros((len(T), nmax), np.float32)
        for i, f in enumerate(flist):
            arr[i, :len(f)] = f
        T.set('forced_%s' % band, arr)

        ib = allbands.index(band)
        flux = T.decam_flux[:, ib]
        arr = np.zeros(len(T), np.float32)
        for i, f in enumerate(flist):
            arr[i] = np.mean([(fi - flux[i])**2 for fi in f])
        arr = np.sqrt(arr)
        T.set('forced_rms_%s' % band, arr)

        T.set('forced_n_%s' % band, np.array([len(f) for f in flist]))

    # Cut to sources with forced phot
    T.cut(
        np.flatnonzero(
            reduce(np.logical_or, [
                np.any(T.get('forced_%s' % band) > 0, axis=1) for band in bands
            ])))
    print 'Cut to', len(T), 'sources with forced phot'

    flux = T.decam_flux[:, 1]
    forced = T.forced_g
    rms = T.forced_rms_g
    N = T.forced_n_g
    I = np.flatnonzero((N > 1) * (flux > 1e3))
    print len(I), 'with flux > 10^3'
    #print 'flux', flux[I]
    #print 'forced'
    for f, ff, r, n in zip(flux[I], forced[I, :], rms[I], N[I]):
        print 'flux', f, 'n', n, 'forced RMS', r, 'forced', ff

    # Compute some summary stats
    # allbands = 'ugrizY'
    # for b in bands:
    #     ib = allbands.index(b)
    #     forced = T.get('forced_%s' % b)
    #     flux = T.decam_flux[:,ib]
    #     T.set('forced_rms_%s' % b, np.sqrt([np.mean([(fi - fl)**2 for fi in flist if fi != 0])
    #                                         for flist,fl in zip(forced, flux)]))

    imgs = {}

    allbands = 'ugrizY'
    for b in bands:
        ib = allbands.index(b)
        plt.clf()
        f = T.get('forced_%s' % b)
        n, nf = f.shape
        flux = T.decam_flux[:, ib]
        lo, hi = 1e-2, 1e5
        plt.plot([lo, hi], [lo, hi], 'r-')
        plt.errorbar(flux,
                     flux,
                     fmt='none',
                     yerr=1. / np.sqrt(T.decam_flux_ivar[:, ib]),
                     ecolor='r')
        for i in range(nf):
            plt.plot(T.decam_flux[:, ib], f[:, i], 'b.', alpha=0.25)
        plt.axis([lo, hi, lo, hi])
        plt.xscale('log')
        plt.yscale('log')
        plt.xlabel('Combined flux')
        plt.ylabel('Forced-photometry flux')
        plt.title('Forced phot: %s band' % b)
        ps.savefig()

    for b in bands:
        ib = allbands.index(b)
        plt.clf()
        f = T.get('forced_%s' % b)
        n, nf = f.shape
        flux = T.decam_flux[:, ib]
        lo, hi = -0.5, 4
        plt.axhline(1., color='r')
        plt.axhline(0., color='k', alpha=0.25)
        for i in range(nf):
            plt.plot(flux, np.clip(f[:, i] / flux, lo, hi), 'b.', alpha=0.25)
        plt.ylim(lo - 0.1, hi + 0.1)
        plt.xlim(1e-2, 1e5)
        plt.xscale('log')
        #plt.yscale('log')
        plt.xlabel('Combined flux')
        plt.ylabel('Relative forced-photometry flux')
        plt.title('Forced phot: %s band' % b)
        ps.savefig()

        # Large relative flux
        I = np.flatnonzero((flux > 0.) * ((f[:, i] / flux) > 4.))
        #print 'Relative fluxes:', (f[:,i]/flux)[I]
        #print 'Fluxes:', flux[I]
        I = I[np.argsort(-flux[I])]
        #print 'Sorted fluxes:', flux[I]
        labels = []
        for t in T[I]:
            fluxes = t.get('forced_%s' % b)
            ib = allbands.index(b)
            flux = t.decam_flux[ib]
            txt = ('%.1f / %.1f / %.1f | %.1f' %
                   (fluxes[0], fluxes[1], fluxes[2], flux))
            labels.append(txt)

        cutouts.append((T, I, 'Large relative flux: %s band' % b, labels))

    for b in bands:
        ib = allbands.index(b)
        plt.clf()
        rms = T.get('forced_rms_%s' % b)
        N = T.get('forced_n_%s' % b)
        flux = T.decam_flux[:, ib]
        lo, hi = -0.5, 4
        #plt.axhline(1., color='r')
        #plt.axhline(0., color='k', alpha=0.25)
        I = np.flatnonzero(N > 1)
        print len(I), 'of', len(rms), 'have >1 exposure'
        plt.plot(flux[I], rms[I], 'b.', alpha=0.25)
        #plt.ylim(lo-0.1, hi+0.1)
        plt.xlim(1e-2, 1e5)
        plt.xscale('log')
        plt.yscale('log')
        plt.xlabel('Combined flux')
        plt.ylabel('Forced-photometry flux RMS')
        plt.title('Forced phot: %s band' % b)
        ps.savefig()

        # Large relative RMS
        I = np.flatnonzero((flux > 10.) * (N > 1) * ((rms / flux) > 0.1))
        I = I[np.argsort(-flux[I])]
        cutouts.append((T, I, 'Large relative RMS: %s band' % b, None))

        T[I].writeto('large-rms-%s.fits' % b)

    # Create a fake "brick" WCS bounding the forced-phot objects
    # rlo = T.ra.min()
    # rhi = T.ra.max()
    # dlo = T.dec.min()
    # dhi = T.dec.max()

    rlo = Tall.ra.min()
    rhi = Tall.ra.max()
    dlo = Tall.dec.min()
    dhi = Tall.dec.max()

    if rhi - rlo > 180:
        print 'No RA wrap-around support'
        sys.exit(0)

    dec = (dlo + dhi) / 2.
    ra = (rlo + rhi) / 2.
    pixscale = 0.262
    ddec = (dhi - dlo)
    H = ddec * 3600. / pixscale
    dra = (rhi - rlo)
    W = dra * np.cos(np.deg2rad(dec)) * 3600. / pixscale
    H = int(np.ceil(H))
    W = int(np.ceil(W))
    print 'Target image shape', (H, W)
    targetwcs = Tan(ra, dec, W / 2. + 0.5, H / 2. + 0.5, -pixscale / 3600., 0.,
                    0., pixscale / 3600., float(W), float(H))
    img = np.zeros((H, W, 3), np.uint8)
    print 'img', img.shape
    decals = Decals()
    for brickname in bricks:
        brick = decals.get_brick_by_name(brickname)
        brickwcs = wcs_for_brick(brick)

        try:
            Yo, Xo, Yi, Xi, nil = resample_with_wcs(targetwcs, brickwcs, [], 3)
        except:
            continue
        brickimg = plt.imread(
            os.path.join('dr1', 'coadd', brickname[:3], brickname,
                         'decals-%s-image.jpg' % brickname))
        print 'brick image', brickimg.shape, brickimg.dtype
        brickimg = brickimg[::-1, :, :]
        img[Yo, Xo, :] = brickimg[Yi, Xi, :]

    # Now fake the "bx,by" coords to refer to 'targetwcs' / 'img'
    ok, x, y = targetwcs.radec2pixelxy(T.ra, T.dec)
    T.bx = x - 1
    T.by = y - 1
    ok, x, y = targetwcs.radec2pixelxy(Tall.ra, Tall.dec)
    Tall.bx = x - 1
    Tall.by = y - 1

    print 'Tall:', len(Tall), 'sources'

    # plt.clf()
    # dimshow(img)
    # ps.savefig()
    #
    # ax = plt.axis()
    # plt.plot(Tall.bx, Tall.by, 'r.')
    # plt.axis(ax)
    # ps.savefig()

    for TT, I, desc, labels in cutouts:
        plt.subplots_adjust(left=0.05,
                            right=0.95,
                            bottom=0.05,
                            top=0.95,
                            hspace=0.05,
                            wspace=0.05)

        kwa = dict(rows=6, cols=9)

        plt.clf()
        plot_objects(TT[I], None, img, targetwcs, **kwa)
        plt.suptitle(desc)
        ps.savefig()

        plt.clf()
        plot_objects(TT[I], Tall, img, targetwcs, labels=labels, **kwa)
        plt.suptitle(desc)
        ps.savefig()
Ejemplo n.º 32
0
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter

from sklearn import manifold, datasets

# Next line to silence pyflakes. This import is needed.
Axes3D

n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2

fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
            % (1000, n_neighbors), fontsize=14)

try:
    # compatibility matplotlib < 1.0
    ax = fig.add_subplot(241, projection='3d')
    ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral)
    ax.view_init(4, -72)
except:
    ax = fig.add_subplot(241, projection='3d')
    pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral)

methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']

for i, method in enumerate(methods):
    t0 = time()
Ejemplo n.º 33
0
def _peak_plot_1(vetomap, x, y, px, py, keep, i, xomit, yomit, sedsn, allblobs,
                 level, dilate, saturated_pix, satur, ps, rgbimg, cut):
    from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
    from scipy.ndimage.measurements import label
    import pylab as plt
    plt.clf()
    plt.suptitle('Peak at %i,%i (%s)' % (x, y, ('cut' if cut else 'kept')))
    newsym = '+'
    if cut:
        newsym = 'x'
    plt.subplot(2, 3, 1)
    plt.imshow(vetomap,
               interpolation='nearest',
               origin='lower',
               cmap='gray',
               vmin=0,
               vmax=1)
    ax = plt.axis()
    prevx = px[:i][keep[:i]]
    prevy = py[:i][keep[:i]]
    plt.plot(prevx, prevy, 'o', mec='r', mfc='none')
    plt.plot(xomit, yomit, 'm.')
    plt.plot(x, y, newsym, mec='r', mfc='r')
    plt.axis(ax)
    plt.title('veto map')

    ablob = allblobs[y, x]
    saddlemap = (sedsn > level)
    saddlemap = binary_dilation(saddlemap, iterations=dilate)
    if saturated_pix is not None:
        saddlemap |= satur
    saddlemap *= (allblobs == ablob)
    # plt.subplot(2,3,2)
    # plt.imshow(saddlemap, interpolation='nearest', origin='lower',
    #            vmin=0, vmax=1, cmap='gray')
    # ax = plt.axis()
    # plt.plot(x, y, newsym, mec='r', mfc='r')
    # plt.plot(prevx, prevy, 'o', mec='r', mfc='none')
    # plt.plot(xomit, yomit, 'm.')
    # plt.axis(ax)
    # plt.title('saddle map (1)')

    plt.subplot(2, 3, 2)
    saddlemap = binary_fill_holes(saddlemap)
    plt.imshow(saddlemap,
               interpolation='nearest',
               origin='lower',
               vmin=0,
               vmax=1,
               cmap='gray')
    ax = plt.axis()
    plt.plot(x, y, newsym, mec='r', mfc='r')
    plt.plot(prevx, prevy, 'o', mec='r', mfc='none')
    plt.plot(xomit, yomit, 'm.')
    plt.axis(ax)
    plt.title('saddle map (fill holes)')

    blobs, _ = label(saddlemap)
    thisblob = blobs[y, x]
    saddlemap *= (blobs == thisblob)

    plt.subplot(2, 3, 4)
    plt.imshow(saddlemap,
               interpolation='nearest',
               origin='lower',
               vmin=0,
               vmax=1,
               cmap='gray')
    ax = plt.axis()
    plt.plot(x, y, newsym, mec='r', mfc='r')
    plt.plot(prevx, prevy, 'o', mec='r', mfc='none')
    plt.plot(xomit, yomit, 'm.')
    plt.axis(ax)
    plt.title('saddle map (this blob)')

    nzy, nzx = np.nonzero(saddlemap)

    plt.subplot(2, 3, 5)
    plt.imshow(saddlemap,
               interpolation='nearest',
               origin='lower',
               vmin=0,
               vmax=1,
               cmap='gray')
    plt.plot(x, y, newsym, mec='r', mfc='r', label='New source')
    plt.plot(prevx, prevy, 'o', mec='r', mfc='none', label='Previous sources')
    plt.plot(xomit, yomit, 'm.', label='Omit sources')
    plt.axis([min(nzx) - 1, max(nzx) + 1, min(nzy) - 1, max(nzy) + 1])
    plt.legend()
    plt.title('saddle map (this blob)')

    if rgbimg is not None:
        for sp in [3, 6]:
            plt.subplot(2, 3, sp)
            plt.imshow(rgbimg, interpolation='nearest', origin='lower')
            ax = plt.axis()
            plt.plot(x, y, newsym, mec='r', mfc='r')
            plt.plot(prevx, prevy, 'o', mec='r', mfc='none')
            plt.plot(xomit, yomit, 'm.')
            if sp == 3:
                plt.axis(ax)
            else:
                plt.axis(
                    [min(nzx) - 1,
                     max(nzx) + 1,
                     min(nzy) - 1,
                     max(nzy) + 1])

    ps.savefig()
Ejemplo n.º 34
0
def sed_matched_detection(sedname,
                          sed,
                          detmaps,
                          detivs,
                          bands,
                          xomit,
                          yomit,
                          romit,
                          blob_dilate=None,
                          nsigma=5.,
                          saddle_fraction=0.1,
                          saddle_min=2.,
                          saturated_pix=None,
                          veto_map=None,
                          cutonaper=True,
                          ps=None,
                          rgbimg=None):
    '''
    Runs a single SED-matched detection filter.

    Avoids creating sources close to existing sources.

    Parameters
    ----------
    sedname : string
        Name of this SED; only used for plots.
    sed : list of floats
        The SED -- a list of floats, one per band, of this SED.
    detmaps : list of numpy arrays
        The per-band detection maps.  These must all be the same size, the
        brick image size.
    detivs : list of numpy arrays
        The inverse-variance maps associated with `detmaps`.
    bands : list of strings
        The band names of the `detmaps` and `detivs` images.
    xomit, yomit, romit : iterables (lists or numpy arrays) of int
        Previously known sources that are to be avoided; x,y +- radius
    nsigma : float, optional
        Detection threshold.
    saddle_fraction : float, optional
        Fraction of the peak heigh for selecting new sources.
    saddle_min : float, optional
        Saddle-point depth from existing sources down to new sources.
    saturated_pix : None or list of numpy arrays, boolean
        A map of pixels that are always considered "hot" when
        determining whether a new source touches hot pixels of an
        existing source.
    cutonaper : bool, optional
        Apply a cut that the source's detection strength must be greater
        than `nsigma` above the 16th percentile of the detection strength in
        an annulus (from 10 to 20 pixels) around the source.
    ps : PlotSequence object, optional
        Create plots?

    Returns
    -------
    hotblobs : numpy array of bool
        A map of the blobs yielding sources in this SED.
    px, py : numpy array of int
        The new sources found.
    aper : numpy array of float
        The detection strength in the annulus around the source, if
        `cutonaper` is set; else -1.
    peakval : numpy array of float
        The detection strength.

    See also
    --------
    sed_matched_filters : creates the `(sedname, sed)` pairs used here
    run_sed_matched_filters : calls this method
    '''
    from scipy.ndimage.measurements import label, find_objects
    from scipy.ndimage.morphology import binary_dilation, binary_fill_holes

    H, W = detmaps[0].shape
    allzero = True
    for iband in range(len(bands)):
        if sed[iband] == 0:
            continue
        if np.all(detivs[iband] == 0):
            continue
        allzero = False
        break
    if allzero:
        info('SED', sedname, 'has all zero weight')
        return None, None, None, None, None

    sedmap = np.zeros((H, W), np.float32)
    sediv = np.zeros((H, W), np.float32)
    if saturated_pix is not None:
        satur = np.zeros((H, W), bool)

    for iband in range(len(bands)):
        if sed[iband] == 0:
            continue
        # We convert the detmap to canonical band via
        #   detmap * w
        # And the corresponding change to sig1 is
        #   sig1 * w
        # So the invvar-weighted sum is
        #    (detmap * w) / (sig1**2 * w**2)
        #  = detmap / (sig1**2 * w)
        sedmap += detmaps[iband] * detivs[iband] / sed[iband]
        sediv += detivs[iband] / sed[iband]**2
        if saturated_pix is not None:
            satur |= saturated_pix[iband]
    sedmap /= np.maximum(1e-16, sediv)
    sedsn = sedmap * np.sqrt(sediv)
    del sedmap
    peaks = (sedsn > nsigma)

    def saddle_level(Y):
        # Require a saddle that drops by (the larger of) "saddle"
        # sigma, or 10% of the peak height.
        # ("saddle" is passed in as an argument to the
        #  sed_matched_detection function)
        drop = max(saddle_min, Y * saddle_fraction)
        return Y - drop

    lowest_saddle = nsigma - saddle_min

    # zero out the edges -- larger margin here?
    peaks[0, :] = 0
    peaks[:, 0] = 0
    peaks[-1, :] = 0
    peaks[:, -1] = 0

    # Label the N-sigma blobs at this point... we'll use this to build
    # "sedhot", which in turn is used to define the blobs that we will
    # optimize simultaneously.  This also determines which pixels go
    # into the fitting!
    if blob_dilate is None:
        blob_dilate = 8
    hotblobs, nhot = label(
        binary_fill_holes(binary_dilation(peaks, iterations=blob_dilate)))

    # find pixels that are larger than their 8 neighbors
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[0:-2, 1:-1])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[2:, 1:-1])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[1:-1, 0:-2])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[1:-1, 2:])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[0:-2, 0:-2])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[0:-2, 2:])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[2:, 0:-2])
    peaks[1:-1, 1:-1] &= (sedsn[1:-1, 1:-1] >= sedsn[2:, 2:])

    if ps is not None:
        import pylab as plt
        from astrometry.util.plotutils import dimshow
        plt.clf()
        plt.subplot(1, 2, 2)
        dimshow(sedsn, vmin=-2, vmax=100, cmap='hot', ticks=False)
        plt.subplot(1, 2, 1)
        dimshow(sedsn, vmin=-2, vmax=10, cmap='hot', ticks=False)
        above = (sedsn > nsigma)
        plot_boundary_map(above)
        ax = plt.axis()
        y, x = np.nonzero(peaks)
        plt.plot(xomit, yomit, 'm.')
        plt.plot(x, y, 'r+')
        plt.axis(ax)
        plt.title('SED %s: S/N & peaks' % sedname)
        ps.savefig()

        #import fitsio
        #fitsio.write('sed-sn-%s.fits' % sedname, sedsn)

        # plt.clf()
        # plt.imshow(sedsn, vmin=-2, vmax=10, interpolation='nearest',
        #            origin='lower', cmap='hot')
        # plot_boundary_map(sedsn > lowest_saddle)
        # plt.title('SED %s: S/N & lowest saddle point bounds' % sedname)
        # ps.savefig()

    # For each new source, compute the saddle value, segment at that
    # level, and drop the source if it is in the same blob as a
    # previously-detected source.

    # We dilate the blobs a bit too, to
    # catch slight differences in centroid positions.
    dilate = 1

    # For efficiency, segment at the minimum saddle level to compute
    # slices; the operations described above need only happen within
    # the slice.
    saddlemap = (sedsn > lowest_saddle)
    saddlemap = binary_dilation(saddlemap, iterations=dilate)
    if saturated_pix is not None:
        saddlemap |= satur
    allblobs, _ = label(saddlemap)
    allslices = find_objects(allblobs)
    ally0 = [sy.start for sy, sx in allslices]
    allx0 = [sx.start for sy, sx in allslices]

    # brightest peaks first
    py, px = np.nonzero(peaks)
    I = np.argsort(-sedsn[py, px])
    py = py[I]
    px = px[I]

    keep = np.zeros(len(px), bool)

    peakval = []
    aper = []
    apin = 10
    apout = 20

    # Map of pixels that are vetoed by sources found so far.  The veto
    # area is based on saddle height.  We go from brightest to
    # faintest pixels.  Thus the saddle level decreases, and the
    # saddlemap areas become larger; the saddlemap when a source is
    # found is a lower bound on the pixels that it will veto based on
    # the saddle heights of fainter sources.  Thus the vetomap isn't
    # the final word, it is just a quick veto of pixels we know for
    # sure will be vetoed.
    if veto_map is None:
        this_veto_map = np.zeros(sedsn.shape, bool)
    else:
        this_veto_map = veto_map.copy()

    for x, y, r in zip(xomit, yomit, romit):
        xlo = int(np.clip(np.floor(x - r), 0, W - 1))
        xhi = int(np.clip(np.ceil(x + r), 0, W - 1))
        ylo = int(np.clip(np.floor(y - r), 0, H - 1))
        yhi = int(np.clip(np.ceil(y + r), 0, H - 1))
        this_veto_map[ylo:yhi + 1, xlo:xhi + 1] |= (np.hypot(
            (x - np.arange(xlo, xhi + 1))[np.newaxis, :],
            (y - np.arange(ylo, yhi + 1))[:, np.newaxis]) < r)

    if ps is not None:
        plt.clf()
        plt.imshow(this_veto_map,
                   interpolation='nearest',
                   origin='lower',
                   vmin=0,
                   vmax=1,
                   cmap='hot')
        plt.title('Veto map')
        ps.savefig()

        info('Peaks in initial veto map:', np.sum(this_veto_map[py, px]), 'of',
             len(px))

        plt.clf()
        plt.imshow(saddlemap,
                   interpolation='nearest',
                   origin='lower',
                   vmin=0,
                   vmax=1,
                   cmap='hot')
        ax = plt.axis()
        for slc in allslices:
            sy, sx = slc
            by0, by1 = sy.start, sy.stop
            bx0, bx1 = sx.start, sx.stop
            plt.plot([bx0, bx0, bx1, bx1, bx0], [by0, by1, by1, by0, by0],
                     'r-')
        plt.axis(ax)
        plt.title('Saddle map (lowest level): %i blobs' % len(allslices))
        ps.savefig()

    # For each peak, determine whether it is isolated enough --
    # separated by a low enough saddle from other sources.  Need only
    # search within its "allblob", which is defined by the lowest
    # saddle.
    info('Found', len(px), 'potential peaks')
    nveto = 0
    nsaddle = 0
    naper = 0
    for i, (x, y) in enumerate(zip(px, py)):
        if this_veto_map[y, x]:
            nveto += 1
            continue
        level = saddle_level(sedsn[y, x])
        ablob = allblobs[y, x]
        index = int(ablob - 1)
        slc = allslices[index]
        saddlemap = (sedsn[slc] > level)
        saddlemap = binary_dilation(saddlemap, iterations=dilate)
        if saturated_pix is not None:
            saddlemap |= satur[slc]
        saddlemap *= (allblobs[slc] == ablob)
        saddlemap = binary_fill_holes(saddlemap)
        blobs, _ = label(saddlemap)
        x0, y0 = allx0[index], ally0[index]
        thisblob = blobs[y - y0, x - x0]
        saddlemap *= (blobs == thisblob)

        # previously found sources:
        ox = np.append(xomit, px[:i][keep[:i]]) - x0
        oy = np.append(yomit, py[:i][keep[:i]]) - y0
        h, w = blobs.shape
        cut = False
        if len(ox):
            ox = ox.astype(int)
            oy = oy.astype(int)
            cut = any((ox >= 0) * (ox < w) * (oy >= 0) * (oy < h) *
                      (blobs[np.clip(oy, 0, h - 1),
                             np.clip(ox, 0, w - 1)] == thisblob))

        # one plot per peak is a little excessive!
        if ps is not None and i < 10:
            _peak_plot_1(this_veto_map, x, y, px, py, keep, i, xomit, yomit,
                         sedsn, allblobs, level, dilate, saturated_pix, satur,
                         ps, rgbimg, cut)
        if False and cut and ps is not None:
            _peak_plot_2(ox, oy, w, h, blobs, thisblob, sedsn, x0, y0, x, y,
                         level, ps)
        if False and (not cut) and ps is not None:
            _peak_plot_3(sedsn, nsigma, x, y, x0, y0, slc, saddlemap, xomit,
                         yomit, px, py, keep, i, cut, ps)

        if cut:
            # in same blob as previously found source.
            # update vetomap
            this_veto_map[slc] |= saddlemap
            nsaddle += 1
            continue

        # Measure in aperture...
        ap = sedsn[max(0, y - apout):min(H, y + apout + 1),
                   max(0, x - apout):min(W, x + apout + 1)]
        apiv = (sediv[max(0, y - apout):min(H, y + apout + 1),
                      max(0, x - apout):min(W, x + apout + 1)] > 0)
        aph, apw = ap.shape
        apx0, apy0 = max(0, x - apout), max(0, y - apout)
        R2 = ((np.arange(aph) + apy0 - y)[:, np.newaxis]**2 +
              (np.arange(apw) + apx0 - x)[np.newaxis, :]**2)
        ap = ap[apiv * (R2 >= apin**2) * (R2 <= apout**2)]
        if len(ap):
            # 16th percentile ~ -1 sigma point.
            m = np.percentile(ap, 16.)
        else:
            # fake
            m = -1.
        if cutonaper:
            if sedsn[y, x] - m < nsigma:
                naper += 1
                continue

        aper.append(m)
        peakval.append(sedsn[y, x])
        keep[i] = True
        this_veto_map[slc] |= saddlemap

        if False and ps is not None:
            plt.clf()
            plt.subplot(1, 2, 1)
            dimshow(ap,
                    vmin=-2,
                    vmax=10,
                    cmap='hot',
                    extent=[apx0, apx0 + apw, apy0, apy0 + aph])
            plt.subplot(1, 2, 2)
            dimshow(ap * ((R2 >= apin**2) * (R2 <= apout**2)),
                    vmin=-2,
                    vmax=10,
                    cmap='hot',
                    extent=[apx0, apx0 + apw, apy0, apy0 + aph])
            plt.suptitle('peak %.1f vs ap %.1f' % (sedsn[y, x], m))
            ps.savefig()

    info('Of', len(px), 'potential peaks:', nveto, 'in veto map,',
         nsaddle, 'cut by saddle test,', naper, 'cut by aper test,',
         np.sum(keep), 'kept')

    if ps is not None:
        pxdrop = px[np.logical_not(keep)]
        pydrop = py[np.logical_not(keep)]
    py = py[keep]
    px = px[keep]

    # Which of the hotblobs yielded sources?  Those are the ones to keep.
    hbmap = np.zeros(nhot + 1, bool)
    hbmap[hotblobs[py, px]] = True
    if len(xomit):
        h, w = hotblobs.shape
        hbmap[hotblobs[np.clip(yomit, 0, h - 1),
                       np.clip(xomit, 0, w - 1)]] = True
    # in case a source is (somehow) not in a hotblob?
    hbmap[0] = False
    hotblobs = hbmap[hotblobs]

    if ps is not None:
        plt.clf()
        dimshow(this_veto_map, vmin=0, vmax=1, cmap='hot')
        plt.title('SED %s: veto map' % sedname)
        ps.savefig()

        plt.clf()
        dimshow(hotblobs, vmin=0, vmax=1, cmap='hot')
        ax = plt.axis()
        p2 = plt.plot(pxdrop, pydrop, 'm+', ms=8, mew=2)
        p1 = plt.plot(px, py, 'g+', ms=8, mew=2)
        p3 = plt.plot(xomit, yomit, 'r+', ms=8, mew=2)
        plt.axis(ax)
        plt.title('SED %s: hot blobs' % sedname)
        plt.figlegend((p3[0], p1[0], p2[0]), ('Existing', 'Keep', 'Drop'),
                      'upper left')
        ps.savefig()

    return hotblobs, px, py, aper, peakval
Ejemplo n.º 35
0
 def multilabels(self, xtext, ytext, title=None):
     plt.subplots_adjust(bottom=0.2)
     plt.figtext(0.5, 0.07, xtext, ha='center')
     plt.figtext(0.05, 0.5, ytext, rotation='vertical', va='center')
     if title is not None: plt.suptitle(title)
Ejemplo n.º 36
0
    splot.set_xticks(())
    splot.set_yticks(())


def plot_lda_cov(lda, splot):
    plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
    plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')


def plot_qda_cov(qda, splot):
    plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
    plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')

###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
    # LDA
    lda = LDA()
    y_pred = lda.fit(X, y, store_covariance=True).predict(X)
    splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
    plot_lda_cov(lda, splot)
    pl.axis('tight')

    # QDA
    qda = QDA()
    y_pred = qda.fit(X, y, store_covariances=True).predict(X)
    splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
    plot_qda_cov(qda, splot)
    pl.axis('tight')
pl.suptitle('LDA vs QDA')
pl.show()
Ejemplo n.º 37
0
def galaxies():
    ps = PlotSequence('kick')
    plt.subplots_adjust(top=0.95, bottom=0.1, left=0.1, right=0.95)
    brick = '3166p025'

    decals = Decals()
    b = decals.get_brick_by_name(brick)
    brickwcs = wcs_for_brick(b)

    # A catalog of sources overlapping one DECaLS CCD, arbitrarily:
    # python projects/desi/forced-photom-decam.py decam/CP20140810_g_v2/c4d_140816_032035_ooi_g_v2.fits.fz 1 DR1 f.fits
    #T = fits_table('cat.fits')

    T = fits_table(
        os.path.join('dr1', 'tractor', brick[:3], 'tractor-%s.fits' % brick))
    print len(T), 'catalog sources'
    print np.unique(T.brick_primary)
    T.cut(T.brick_primary)
    print len(T), 'primary'

    print 'Out of bounds:', np.unique(T.out_of_bounds)
    print 'Left blob:', np.unique(T.left_blob)

    img = plt.imread(
        os.path.join('dr1', 'coadd', brick[:3], brick,
                     'decals-%s-image.jpg' % brick))
    img = img[::-1, :, :]
    print 'Image:', img.shape

    if False:
        resid = plt.imread(
            os.path.join('dr1', 'coadd', brick[:3], brick,
                         'decals-%s-resid.jpg' % brick))
        resid = resid[::-1, :, :]

    T.shapeexp_e1_err = 1. / np.sqrt(T.shapeexp_e1_ivar)
    T.shapeexp_e2_err = 1. / np.sqrt(T.shapeexp_e2_ivar)
    T.shapeexp_r_err = 1. / np.sqrt(T.shapeexp_r_ivar)
    T.shapedev_e1_err = 1. / np.sqrt(T.shapedev_e1_ivar)
    T.shapedev_e2_err = 1. / np.sqrt(T.shapedev_e2_ivar)
    T.shapedev_r_err = 1. / np.sqrt(T.shapedev_r_ivar)

    T.gflux = T.decam_flux[:, 1]
    T.rflux = T.decam_flux[:, 2]
    T.zflux = T.decam_flux[:, 4]

    I = np.flatnonzero(T.type == 'EXP ')
    J = np.flatnonzero(T.type == 'DEV ')

    E = T[I]
    D = T[J]

    cutobjs = []

    cut = np.logical_or(E.shapeexp_e1_err > 1., E.shapeexp_e2_err > 1.)
    I = np.flatnonzero(cut)
    print len(I), 'EXP with large ellipticity error'
    cutobjs.append((E[I], 'EXP ellipticity error > 1'))

    E.cut(np.logical_not(cut))

    I = np.flatnonzero(
        np.logical_or(D.shapedev_e1_err > 1., D.shapedev_e2_err > 1.))
    print len(I), 'DEV with large ellipticity error'
    cutobjs.append((D[I], 'DEV ellipticity error > 1'))

    I = np.flatnonzero(
        np.logical_or(
            np.abs(E.shapeexp_e1) > 0.5,
            np.abs(E.shapeexp_e2) > 0.5))
    cutobjs.append((E[I], 'EXP with ellipticity > 0.5'))

    I = np.flatnonzero(
        np.logical_or(
            np.abs(D.shapedev_e1) > 0.5,
            np.abs(D.shapedev_e2) > 0.5))
    cutobjs.append((E[I], 'DEV with ellipticity > 0.5'))

    I = np.flatnonzero(
        np.logical_or(E.shapeexp_e1_err < 3e-3, E.shapeexp_e2_err < 3e-3))
    cutobjs.append((E[I], 'EXP with small ellipticity errors (<3e-3)'))

    I = np.flatnonzero(
        np.logical_or(D.shapedev_e1_err < 3e-3, D.shapedev_e2_err < 3e-3))
    cutobjs.append((D[I], 'DEV with small ellipticity errors (<3e-3)'))

    I = np.flatnonzero(D.shapedev_r > 10.)
    cutobjs.append((D[I], 'DEV with large radius (>10")'))

    I = np.flatnonzero(D.shapedev_r_err < 2e-3)
    cutobjs.append((D[I], 'DEV with small radius errors (<2e-3)'))

    I = np.flatnonzero((D.rflux > 100.) * (D.shapedev_r < 5.))
    cutobjs.append((D[I], 'DEV, small & bright'))

    I = np.flatnonzero((E.rflux > 100.) * (E.shapeexp_r < 5.))
    cutobjs.append((E[I], 'EXP, small & bright'))

    # I = np.argsort(-T.decam_flux[:,2])
    # cutobjs.append((T[I], 'brightest objects'))

    I = np.flatnonzero(np.logical_or(D.rflux < -5., D.gflux < -5))
    cutobjs.append((D[I], 'DEV with neg g or r flux'))

    I = np.flatnonzero(np.logical_or(E.rflux < -5., E.gflux < -5))
    cutobjs.append((E[I], 'EXP with neg g or r flux'))

    I = np.flatnonzero(T.decam_rchi2[:, 2] > 5.)
    cutobjs.append((T[I], 'rchi2 > 5'))

    plt.subplots_adjust(left=0.1,
                        right=0.95,
                        bottom=0.1,
                        top=0.95,
                        hspace=0.05,
                        wspace=0.05)

    # plt.clf()
    # p1 = plt.semilogy(T.shapeexp_e1[I], T.shapeexp_e1_ivar[I], 'b.')
    # p2 = plt.semilogy(T.shapeexp_e2[I], T.shapeexp_e2_ivar[I], 'r.')
    # plt.xlabel('Ellipticity e')
    # plt.ylabel('Ellipticity inverse-variance e_ivar')
    # plt.title('EXP galaxies')
    # plt.legend([p1[0],p2[0]], ['e1','e2'])
    # ps.savefig()

    plt.clf()
    p1 = plt.semilogy(E.shapeexp_e1, E.shapeexp_e1_err, 'b.')
    p2 = plt.semilogy(E.shapeexp_e2, E.shapeexp_e2_err, 'r.')
    plt.xlabel('Ellipticity e')
    plt.ylabel('Ellipticity error e_err')
    plt.title('EXP galaxies')
    plt.legend([p1[0], p2[0]], ['e1', 'e2'])
    ps.savefig()

    # plt.clf()
    # p1 = plt.semilogy(T.shapedev_e1[J], T.shapedev_e1_ivar[J], 'b.')
    # p2 = plt.semilogy(T.shapedev_e2[J], T.shapedev_e2_ivar[J], 'r.')
    # plt.xlabel('Ellipticity e')
    # plt.ylabel('Ellipticity inverse-variance e_ivar')
    # plt.title('DEV galaxies')
    # plt.legend([p1[0],p2[0]], ['e1','e2'])
    # ps.savefig()

    plt.clf()
    p1 = plt.semilogy(D.shapedev_e1, D.shapedev_e1_err, 'b.')
    p2 = plt.semilogy(D.shapedev_e2, D.shapedev_e2_err, 'r.')
    plt.xlabel('Ellipticity e')
    plt.ylabel('Ellipticity error e_err')
    plt.title('DEV galaxies')
    plt.legend([p1[0], p2[0]], ['e1', 'e2'])
    ps.savefig()

    plt.clf()
    p1 = plt.loglog(D.shapedev_r, D.shapedev_r_err, 'b.')
    p2 = plt.loglog(E.shapeexp_r, E.shapeexp_r_err, 'r.')
    plt.xlabel('Radius r')
    plt.ylabel('Radius error r_err')
    plt.title('DEV, EXP galaxies')
    plt.legend([p1[0], p2[0]], ['deV', 'exp'])
    ps.savefig()

    plt.clf()
    p1 = plt.loglog(D.rflux, D.shapedev_r, 'b.')
    p2 = plt.loglog(E.rflux, E.shapeexp_r, 'r.')
    plt.xlabel('r-band flux')
    plt.ylabel('Radius r')
    plt.title('DEV, EXP galaxies')
    plt.legend([p1[0], p2[0]], ['deV', 'exp'])
    ps.savefig()

    plt.clf()
    p1 = plt.loglog(-D.rflux, D.shapedev_r, 'b.')
    p2 = plt.loglog(-E.rflux, E.shapeexp_r, 'r.')
    plt.xlabel('Negative r-band flux')
    plt.ylabel('Radius r')
    plt.title('DEV, EXP galaxies')
    plt.legend([p1[0], p2[0]], ['deV', 'exp'])
    ps.savefig()

    plt.clf()
    plt.loglog(D.rflux, D.decam_rchi2[:, 2], 'b.')
    plt.loglog(E.rflux, E.decam_rchi2[:, 2], 'r.')
    plt.xlabel('r-band flux')
    plt.ylabel('rchi2 in r')
    plt.title('DEV, EXP galaxies')
    plt.legend([p1[0], p2[0]], ['deV', 'exp'])
    ps.savefig()

    for objs, desc in cutobjs:
        if len(objs) == 0:
            print 'No objects in cut', desc
            continue

        rows, cols = 4, 6
        objs = objs[:rows * cols]

        if False:
            plt.clf()
            dimshow(img)
            plt.plot(objs.bx, objs.by, 'rx')
            ps.savefig()

        plt.clf()
        plt.subplots_adjust(left=0.05,
                            right=0.95,
                            bottom=0.05,
                            top=0.95,
                            hspace=0.05,
                            wspace=0.05)

        plot_objects(objs, None, img, brickwcs)
        plt.suptitle(desc)
        ps.savefig()

        plt.clf()
        plot_objects(objs, T, img, brickwcs, fracflux=True)
        plt.suptitle(desc)
        ps.savefig()

        if False:
            plt.clf()
            for i, o in enumerate(objs):
                plt.subplot(rows, cols, i + 1)
                H, W, three = img.shape
                dimshow(resid[o.y0:min(H, o.by + S), o.x0:min(W, o.bx + S), :],
                        ticks=False)
            plt.suptitle(desc)
            ps.savefig()

    sys.exit(0)

    print 'RA', T.ra.min(), T.ra.max()
    print 'Dec', T.dec.min(), T.dec.max()

    # Uhh, how does *this* happen?!  Fitting gone wild I guess
    # T.cut((T.ra > 0) * (T.ra < 360) * (T.dec > -90) * (T.dec < 90))
    # print 'RA', T.ra.min(), T.ra.max()
    # print 'Dec', T.dec.min(), T.dec.max()
    # rlo,rhi = [np.percentile(T.ra,  p) for p in [1,99]]
    # dlo,dhi = [np.percentile(T.dec, p) for p in [1,99]]
    # print 'RA', rlo,rhi
    # print 'Dec', dlo,dhi
    # plt.clf()
    # plothist(T.ra, T.dec, 100, range=((rlo,rhi),(dlo,dhi)))
    # plt.xlabel('RA')
    # plt.ylabel('Dec')
    # ps.savefig()

    # decals = Decals()
    # B = decals.get_bricks()
    # #B.about()
    # brick = B[B.brickid == brickid]
    # assert(len(brick) == 1)
    # brick = brick[0]
    # wcs = wcs_for_brick(brick)
    # ccds = decals.get_ccds()
    # ccds.cut(ccds_touching_wcs(wcs, ccds))
    # print len(ccds), 'CCDs'
    # #ccds.about()
    # ccds.cut(ccds.filter == 'r')
    # print len(ccds), 'CCDs'
    # S = []
    # for ccd in ccds:
    #     im = DecamImage(ccd)
    #     S.append(fits_table(im.sdssfn))
    # S = merge_tables(S)
    # print len(S), 'total SDSS'
    # #nil,I = np.unique(S.ra, return_index=True)
    # nil,I = np.unique(['%.5f %.5f' % (r,d) for r,d in zip(S.ra,S.dec)], return_index=True)
    # S.cut(I)
    # print len(S), 'unique'
    #
    # I,J,d = match_radec(T.ra, T.dec, S.ra, S.dec, 1./3600.)
    # print len(I), 'matches'
    #
    # plt.clf()
    # plt.loglog(S.r_psfflux[J], T.decam_r_nanomaggies[I], 'r.')
    # ps.savefig()

    #plt.clf()
    #plt.loglog(T.sdss_modelflux[:,2], T.decam_r_nanomaggies, 'r.')
    #ps.savefig()

    for bindex, band in [(1, 'g'), (2, 'r'), (4, 'z')]:
        sflux = T.sdss_modelflux[:, bindex]
        dflux = T.get('decam_%s_nanomaggies' % band)
        I = np.flatnonzero(sflux > 10.)
        med = np.median(dflux[I] / sflux[I])
        # plt.clf()
        # plt.loglog(sflux, dflux / sflux, 'ro', mec='r', ms=4, alpha=0.1)
        # plt.axhline(med, color='k')
        # plt.ylim(0.5, 2.)
        # ps.savefig()

        corr = dflux / med
        T.set('decam_%s_nanomaggies_corr' % band, corr)
        T.set('decam_%s_mag_corr' % band, NanoMaggies.nanomaggiesToMag(corr))

        dflux = T.get('decam_%s_nanomaggies_corr' % band)
        plt.clf()
        #plt.loglog(sflux, dflux / sflux, 'o', mec='b', ms=4, alpha=0.1)
        plt.loglog(sflux, dflux / sflux, 'b.', alpha=0.01)
        plt.xlim(1e-1, 3e3)
        plt.axhline(1., color='k')
        plt.ylim(0.5, 2.)
        plt.xlabel('SDSS flux (nmgy)')
        plt.ylabel('DECam flux / SDSS flux')
        plt.title('%s band' % band)
        ps.savefig()

    bands = 'grz'

    # for band in bands:
    #     plt.clf()
    #     sn = T.get('decam_%s_nanomaggies' % band) * np.sqrt(T.get('decam_%s_nanomaggies_invvar' % band))
    #     mag = T.get('decam_%s_mag_corr' % band)
    #     plt.semilogy(mag, sn, 'b.')
    #     plt.axis([20, 26, 1, 100])
    #     ps.savefig()

    ccmap = dict(g='g', r='r', z='m')
    plt.clf()
    for band in bands:
        sn = T.get('decam_%s_nanomaggies' % band) * np.sqrt(
            T.get('decam_%s_nanomaggies_invvar' % band))
        mag = T.get('decam_%s_mag_corr' % band)
        cc = ccmap[band]
        #plt.semilogy(mag, sn, '.', color=cc, alpha=0.2)
        plt.semilogy(mag, sn, '.', color=cc, alpha=0.01, mec='none')
    plt.xlabel('mag')
    plt.ylabel('Flux Signal-to-Noise')
    tt = [1, 2, 3, 4, 5, 10, 20, 30, 40, 50]
    plt.yticks(tt, ['%i' % t for t in tt])
    plt.axhline(5., color='k')
    plt.axis([21, 26, 1, 20])
    plt.title('DECaLS depth')
    ps.savefig()

    [gsn, rsn, zsn] = [
        T.get('decam_%s_nanomaggies' % band) *
        np.sqrt(T.get('decam_%s_nanomaggies_invvar' % band)) for band in bands
    ]
    TT = T[(gsn > 5.) * (rsn > 5.) * (zsn > 5.)]

    # plt.clf()
    # plt.plot(g-r, r-z, 'k.', alpha=0.2)
    # plt.xlabel('g - r (mag)')
    # plt.ylabel('r - z (mag)')
    # plt.xlim(-0.5, 2.5)
    # plt.ylim(-0.5, 3)
    # ps.savefig()

    plt.clf()
    lp = []
    cut = (TT.sdss_objc_type == 6)
    g, r, z = [
        NanoMaggies.nanomaggiesToMag(TT.sdss_psfflux[:, i]) for i in [1, 2, 4]
    ]
    p = plt.plot((g - r)[cut], (r - z)[cut], '.', alpha=0.3, color='b')
    lp.append(p[0])
    cut = (TT.sdss_objc_type == 3)
    g, r, z = [
        NanoMaggies.nanomaggiesToMag(TT.sdss_modelflux[:, i])
        for i in [1, 2, 4]
    ]
    p = plt.plot((g - r)[cut], (r - z)[cut], '.', alpha=0.3, color='r')
    lp.append(p[0])
    plt.xlabel('g - r (mag)')
    plt.ylabel('r - z (mag)')
    plt.xlim(-0.5, 2.5)
    plt.ylim(-0.5, 3)
    plt.legend(lp, ['stars', 'galaxies'])
    plt.title('SDSS')
    ps.savefig()

    g = TT.decam_g_mag_corr
    r = TT.decam_r_mag_corr
    z = TT.decam_z_mag_corr

    plt.clf()
    lt, lp = [], []
    for cut, cc, tt in [(TT.sdss_objc_type == 6, 'b', 'stars'),
                        (TT.sdss_objc_type == 3, 'r', 'galaxies'),
                        (TT.sdss_objc_type == 0, 'g', 'faint')]:
        p = plt.plot((g - r)[cut], (r - z)[cut], '.', alpha=0.3, color=cc)
        lt.append(tt)
        lp.append(p[0])
    plt.xlabel('g - r (mag)')
    plt.ylabel('r - z (mag)')
    plt.xlim(-0.5, 2.5)
    plt.ylim(-0.5, 3)
    plt.legend(lp, lt)
    plt.title('DECaLS')
    ps.savefig()

    # Stars/galaxies in subplots

    plt.clf()
    lp = []
    cut = (TT.sdss_objc_type == 6)
    g, r, z = [
        NanoMaggies.nanomaggiesToMag(TT.sdss_psfflux[:, i]) for i in [1, 2, 4]
    ]
    plt.subplot(1, 2, 1)
    p = plt.plot((g - r)[cut], (r - z)[cut], '.', alpha=0.02, color='b')
    px = plt.plot(100, 100, '.', color='b')
    lp.append(px[0])
    plt.xlabel('g - r (mag)')
    plt.ylabel('r - z (mag)')
    plt.xlim(-0.5, 2.5)
    plt.ylim(-0.5, 3)
    cut = (TT.sdss_objc_type == 3)
    g, r, z = [
        NanoMaggies.nanomaggiesToMag(TT.sdss_modelflux[:, i])
        for i in [1, 2, 4]
    ]
    plt.subplot(1, 2, 2)
    p = plt.plot((g - r)[cut], (r - z)[cut], '.', alpha=0.02, color='r')
    px = plt.plot(100, 100, '.', color='r')
    lp.append(px[0])
    plt.xlabel('g - r (mag)')
    plt.ylabel('r - z (mag)')
    plt.xlim(-0.5, 2.5)
    plt.ylim(-0.5, 3)
    plt.figlegend(lp, ['stars', 'galaxies'], 'upper right')
    plt.suptitle('SDSS')
    ps.savefig()

    g = TT.decam_g_mag_corr
    r = TT.decam_r_mag_corr
    z = TT.decam_z_mag_corr

    plt.clf()
    lt, lp = [], []
    for i, (cut, cc, tt) in enumerate([
        (TT.sdss_objc_type == 6, 'b', 'stars'),
        (TT.sdss_objc_type == 3, 'r', 'galaxies'),
            #(TT.sdss_objc_type == 0, 'g', 'faint'),
    ]):
        plt.subplot(1, 2, i + 1)
        p = plt.plot((g - r)[cut], (r - z)[cut], '.', alpha=0.02, color=cc)
        lt.append(tt)
        px = plt.plot(100, 100, '.', color=cc)
        lp.append(px[0])
        plt.xlabel('g - r (mag)')
        plt.ylabel('r - z (mag)')
        plt.xlim(-0.5, 2.5)
        plt.ylim(-0.5, 3)
    plt.figlegend(lp, lt, 'upper right')
    plt.suptitle('DECaLS')
    ps.savefig()
Ejemplo n.º 38
0
    def plot_window_item(self, m, ind, x, r, k, label, U, scores, rgb_window,
                         where_in_image):

        feature = label.split('[')[0].split('~')[0].split('_')[2]
        unit = label.split('_')[1]
        camera = label[5]

        matplotlib.rc('axes', edgecolor='w')

        fff = pylab.figure()
        pylab.subplots_adjust(wspace=0.05, hspace=0.26)

        # FIRST SUBPLOT: original AOD data
        pylab.subplot(2, 2, 1)
        pylab.plot(self.xvals + 1, r, 'r.-', label='Expected')
        pylab.plot(self.xvals + 1, x, 'b.-', label='Observations')
        pylab.xlim([0.87 * self.xvals.min(), 1.13 * self.xvals.max()])
        pylab.ylim(0, 255)
        pylab.tick_params(\
          axis='both',          # changes apply to the x-axis
          which='both',      # both major and minor ticks are affected
        #   bottom='off',      # ticks along the bottom edge are off
        #   left='off',      # ticks along the left edge are off
          right='off',      # ticks along the right edge are off
          top='off')         # ticks along the top edge are off
        #   labelbottom='off', # labels along the bottom edge are off
        #   labelleft='off')   # labels along the left edge are off?
        pylab.xlabel('Color Band (Wavelength)')
        pylab.ylabel('Average Intensity of Band')
        pylab.legend(prop={'size': 10})

        # Voodoo required to get colorbar to be the right height.
        #from mpl_toolkits.axes_grid1 import make_axes_locatable
        #div = make_axes_locatable(pylab.gca())
        #cax = div.append_axes('right','5%',pad='3%')
        #pylab.colorbar(im, cax=cax)

        # SECOND SUBPLOT: reconstructed data

        matplotlib.rc('axes', edgecolor='w')

        pylab.subplot(2, 2, 2)
        # im = pylab.imshow(255 * np.ones(where_in_image.shape))
        if camera == 'L':
            lbltxt = str(' Band Labels:\n\n' + '1. 440 nm\n' +
                         '2. Bayer Filter Blue [460 nm]\n' + '3. 525 nm\n' +
                         '4. Bayer Filter Green [540 nm]\n' +
                         '5. Bayer Filter Red [620 nm]\n' + '6. 675 nm\n' +
                         '7. 750 nm\n' + '8. 865 nm\n' + '9. 1035 nm\n')
        elif camera == 'R':
            lbltxt = str(' Band Labels:\n\n' + '1. 440 nm\n' +
                         '2. Bayer Filter Blue [460 nm]\n' + '3. 525 nm\n' +
                         '4. Bayer Filter Green [540 nm]\n' +
                         '5. Bayer Filter Red [620 nm]\n' + '6. 800 nm\n' +
                         '7. 905 nm\n' + '8. 935 nm\n' + '9. 1035 nm\n')
        else:
            print "Bad camera: %s" % camera
            lbltxt = "Bad camera."

        pylab.annotate(lbltxt,
                       xy=(.2, .95),
                       xycoords='axes fraction',
                       horizontalalignment='left',
                       verticalalignment='top',
                       size=10)

        pylab.tick_params(\
          axis='both',          # changes apply to the x-axis
          which='both',      # both major and minor ticks are affected
          bottom='off',      # ticks along the bottom edge are off
          left='off',      # ticks along the left edge are off
          right='off',      # ticks along the right edge are off
          top='off',         # ticks along the top edge are off
          labelbottom='off', # labels along the bottom edge are off
          labelleft='off')   # labels along the left edge are off?
        pylab.xlabel('')
        #  div = make_axes_locatable(pylab.gca())
        #  cax = div.append_axes('right','5%',pad='3%')
        #  pylab.colorbar(im, cax=cax)
        # pylab.colorbar(im)

        # THIRD SUBPLOT: residual data

        pylab.subplot(2, 2, 3)

        pylab.imshow(rgb_window, interpolation='nearest')

        pylab.tick_params(\
          axis='both',          # changes apply to the x-axis
          which='both',      # both major and minor ticks are affected
          bottom='off',      # ticks along the bottom edge are off
          left='off',      # ticks along the left edge are off
          right='off',      # ticks along the right edge are off
          top='off',         # ticks along the top edge are off
          labelbottom='off', # labels along the bottom edge are off
          labelleft='off')   # labels along the left edge are off?
        pylab.xlabel('Selected window')

        # Voodoo required to get colorbar to be the right height.
        #    div = make_axes_locatable(pylab.gca())
        #    cax = div.append_axes('right','5%',pad='3%')
        #    cbar = pylab.colorbar(im, cax=cax)
        # cbar = pylab.colorbar(im)
        # tickvals = numpy.arange(-1,1.1,0.5)
        # cbar.set_ticks(tickvals)
        # cbar.set_ticklabels(tickvals)

        # FOURTH SUBPLOT: actual RGB image

        pylab.subplot(2, 2, 4)

        pylab.imshow(where_in_image, interpolation='nearest')

        pylab.tick_params(\
          axis='both',          # changes apply to the x-axis
          which='both',      # both major and minor ticks are affected
          bottom='off',      # ticks along the bottom edge are off
          left='off',      # ticks along the left edge are off
          right='off',      # ticks along the right edge are off
          top='off',         # ticks along the top edge are off
          labelbottom='off', # labels along the bottom edge are off
          labelleft='off')   # labels along the left edge are off?

        pylab.xlabel('Location within original image')

        # Voodoo required to get colorbar to be the right height.
        #    div = make_axes_locatable(pylab.gca())
        #    cax = div.append_axes('right','5%',pad='3%')
        #    cbar = pylab.colorbar(im, cax=cax)

        pylab.suptitle('DEMUD selection %d (%s), item %d, using K=%d' % \
                       (m, label, ind, k))

        outdir = os.path.join('results', self.name)
        if not os.path.exists(outdir):
            os.mkdir(outdir)
        figfile = os.path.join(outdir, 'sel-%d-(%s).pdf' % (m, label))
        plt.savefig(figfile, bbox_inches='tight', pad_inches=0.1)
        print 'Wrote plot to %s' % figfile

        pylab.close(fff)
        pylab.close("all")
Ejemplo n.º 39
0
    # init_sequence = np.zeros((4, ntimesteps))
    # for i in range(1, myiLQR.prediction_horizon):
    #     init_sequence[:,i] = myiLQR.model_f(init_sequence[:,i-1], myiLQR.input_sequence[:,i-1])

    # plt.figure()
    # plt.plot(init_sequence[0,:], init_sequence[1,:], '--',linewidth=1.5, label = 'init state')
    # plt.show()

    start_time = timeit.default_timer()
    myiLQR(show_conv=False)
    elapsed = timeit.default_timer() - start_time

    print("elapsed time: ", elapsed)

    pl.figure(figsize=(8 * 1.1, 6 * 1.1))
    pl.suptitle('iLQR: 2D, x and y.  ')
    pl.axis('equal')
    pl.plot(myiLQR.target_state_sequence[0, :],
            myiLQR.target_state_sequence[1, :],
            '--r',
            label='Target',
            linewidth=2)
    pl.plot(myiLQR.state_sequence[0, :],
            myiLQR.state_sequence[1, :],
            '-+b',
            label='iLQR',
            linewidth=1.0)
    pl.grid('on')
    pl.xlabel('x (meters)')
    pl.ylabel('y (meters)')
    pl.legend(fancybox=True, framealpha=0.2)
    g_b[f] = []

    for i in t:
        gb_i = ([(Aa_k*Ab_k*c_k*numpy.sin(w_k*i+phia_k+phib_k)) for Aa_k, Ab_k, c_k, w_k, phia_k, phib_k in zip(Aa_karray, Ab_karray, c_karray, w_karray, phia_karray, phib_karray)])
        g_b[f].append(sum(gb_i))



#plotting in a cutie way
n_plots = len(f_array)
cols = 2
rows = n_plots/cols

pylab.figure(1)
pylab.suptitle("Integrator output waveforms simulation", fontsize=14)

for h, f in zip(range(1, n_plots+1, 1), f_array):
    pylab.subplot(rows, cols, h)
    pylab.plot(t*f, g_a[f], label='f = %d Hz' % f) #period set as x-unit
    pylab.legend(prop={'size':10})
    pylab.locator_params(nbins=4)
    pylab.grid()

    #not really elegant, but less boring...
    if (h==3):
        pylab.ylabel('Simulated Signal [arb.un.]', size='large')
    if (h==5) or (h==6):
        pylab.xlabel('Time [T]', size='large')

pylab.figure(2)
Ejemplo n.º 41
0
    def multiplot(self, jd1=730120.0, djd=60, dt=20):

        if not hasattr(self, 'disci'):
            self.generate_regdiscs()
            self.x = self.disci
            self.y = self.discj
        if not hasattr(self, 'lon'):
            self.ijll()

        if USE_FIGPREF: figpref.presentation()
        pl.close(1)
        pl.figure(1, (10, 10))

        conmat = self[jd1 - 730120.0:jd1 - 730120.0 + 60, dt:dt + 10]
        x, y = self.gcm.mp(self.lon, self.lat)
        self.gcm.mp.merid = []
        self.gcm.mp.paral = []

        pl.subplots_adjust(wspace=0, hspace=0, top=0.95)

        pl.subplot(2, 2, 1)
        pl.pcolormesh(miv(conmat), cmap=cm.hot)
        pl.clim(0, 250)
        pl.plot([0, 800], [0, 800], 'g', lw=2)
        pl.gca().set_aspect(1)
        pl.setp(pl.gca(), yticklabels=[])
        pl.setp(pl.gca(), xticklabels=[])
        pl.colorbar(aspect=40,
                    orientation='horizontal',
                    pad=0,
                    shrink=.8,
                    fraction=0.05,
                    ticks=[0, 50, 100, 150, 200])

        pl.subplot(2, 2, 2)
        colorvec = (np.nansum(conmat, axis=1) - np.nansum(conmat, axis=0))[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0, 10000)

        pl.subplot(2, 2, 3)
        colorvec = np.nansum(conmat, axis=1)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0, 10000)

        pl.subplot(2, 2, 4)
        colorvec = np.nansum(conmat, axis=0)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0, 10000)
        if 'mycolor' in sys.modules:
            mycolor.freecbar([0.2, .06, 0.6, 0.020], [2000, 4000, 6000, 8000])
        pl.suptitle("Trajectories seeded from %s to %s, Duration: %i-%i days" %
                    (pl.num2date(jd1).strftime("%Y-%m-%d"),
                     pl.num2date(jd1 + djd).strftime("%Y-%m-%d"), dt, dt + 10))

        pl.savefig('multplot_%i_%03i.png' % (jd1, dt), transparent=True)
Ejemplo n.º 42
0
def run_test(args):

    rho = args.rho
    num_times = args.num_times
    min_num_rows = args.min_num_rows
    max_num_rows = args.max_num_rows
    n_grid = args.n_grid
    filename = args.filename
    discrete = args.discrete

    num_samples = []
    for ns in log_linspace(min_num_rows,max_num_rows,n_grid).tolist():
        num_samples.append(int(ns))

    variances = []

    burn_in = 200

    MIs = numpy.zeros( (num_times, len(num_samples)) )

    mi_diff = numpy.zeros( (len(num_samples), num_times) )

    if not discrete:
        T, true_mi, external_mi =  gen_correlated_data(num_samples[-1], rho)
        cctypes = ['continuous']*2
    else:
        T, true_mi, external_mi =  gen_correlated_data_discrete(num_samples[-1], rho)
        cctypes = ['multinomial']*2

    data_subs = []

    n_index = 0
    for n in num_samples:
        T_sub = numpy.copy(T[0:n-1,:])
        
        data = []

        data_subs.append(T_sub)

        print("%i: " % n)
        for t in range(num_times):
            M_c = du.gen_M_c_from_T(T_sub,cctypes)
            state = State.p_State(M_c, T_sub)
            state.transition(n_steps=burn_in)
            X_D = state.get_X_D()
            X_L = state.get_X_L()

            MI, Linfoot = iu.mutual_information(M_c, [X_L], [X_D], [(0,1)], n_samples=5000)

            mi_diff[n_index,t] = true_mi-MI[0][0]

            print("\t%i TRUE: %e, EST: %e " % (t, true_mi, MI[0][0]) )

            MIs[t,n_index] = MI[0][0]

        n_index += 1


    if discrete:
        dtype_str = "discrete"
    else:
        dtype_str = "continuous"

    basefilename = filename + str(int(time.time()))
    figname = basefilename + ".png"
    datname = basefilename + "_DATA.png"

    pl.figure

    # plot data
    # pl.subplot(1,2,1)
    pl.figure(tight_layout=True,figsize=(len(data_subs)*4,4))
    i = 0
    for T_s in data_subs:
        pl.subplot(1,len(data_subs),i+1)
        num_rows = num_samples[i]
        if discrete:
            heatmap, xedges, yedges = numpy.histogram2d(T_s[:,0], T_s[:,1], bins=10)
            extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
            pl.imshow(heatmap, extent=extent, interpolation="nearest")
        else:
            pl.scatter(T_s[:,0], T_s[:,1], alpha=.3, s=81)
        pl.title('#r: '+str(num_rows))

        i += 1

    pl.suptitle("data for rho: %1.2f (%s)" % (rho, dtype_str) )

    pl.savefig(datname)
    pl.clf()

    pl.figure(tight_layout=True,figsize=(5,4))
    # plot convergence
    # pl.subplot(1,2,2)
    # standard deviation
    stderr = numpy.std(MIs,axis=0)#/(float(num_times)**.5)
    mean = numpy.mean(MIs,axis=0)
    pl.errorbar(num_samples,mean,yerr=stderr,c='blue')
    pl.plot(num_samples, mean, c="blue", alpha=.8, label='mean MI')
    pl.plot(num_samples, [true_mi]*len(num_samples), color='red', alpha=.8, label='true MI');
    pl.plot(num_samples, [external_mi]*len(num_samples), color=(0,.5,.5), alpha=.8, label='external MI');
    pl.title('convergence')
    pl.xlabel('#rows in X (log)')
    pl.ylabel('CrossCat MI - true MI')

    pl.legend(loc=0,prop={'size':8})
    pl.gca().set_xscale('log')

    # save output
    pl.title("convergence rho: %1.2f (%s)" % (rho, dtype_str) )

    pl.savefig(figname)
Ejemplo n.º 43
0
def mixing(flmlname):

    print "\n********** Calculating the mixing diagnostics\n"
    # warn user about assumptions
    print "Background potential energy calculations makes two assumptions: \n i) domain height = 0.1m \n ii) initial temperature difference is 1.0 degC"
    domainheight = 0.1
    rho_zero, T_zero, alpha, g = le_tools.Getconstantsfromflml(flmlname)

    # get mixing bin bounds and remove lower bound (=-\infty)
    bounds = le_tools.Getmixingbinboundsfromflml(flmlname)[1:]

    # find indicies of selected bounds for plotting
    index_plot = []
    for b in [-0.5, -0.25, 0.0, 0.25, 0.5]:
        index_plot.append(
            pylab.find(numpy.array([abs(val - b)
                                    for val in bounds]) < 1E-6)[0])

    time = []
    volume_fraction = []
    reference_state = []
    bpe = []

    # get stat files
    # time_index_end used to ensure don't repeat values
    stat_files, time_index_end = le_tools.GetstatFiles('./')
    for i in range(len(stat_files)):
        stat = stat_parser(stat_files[i])
        for j in range(time_index_end[i]):
            time.append(stat['ElapsedTime']['value'][j])
            bins = stat['fluid']['Temperature']['mixing_bins%cv_normalised'][:,
                                                                             j]
            # rearrange bins so have nobins = nobounds -1
            # amounts to including any undershoot or overshoots in lower/upper most bin
            # for discussion of impacts see H. Hiester, PhD thesis (2011), chapter 4.
            bins[1] = bins[0] + bins[1]
            bins[-2] = bins[-2] + bins[-1]
            bins = bins[1:-1]

            # sum up bins for plot
            volume_fraction.append(
                tuple([
                    sum(bins[index_plot[k]:index_plot[k + 1]])
                    for k in range(len(index_plot) - 1)
                ]))

            # get reference state using method of Tseng and Ferziger 2001
            Abins = sum([
                bins[k] * (bounds[k + 1] - bounds[k]) for k in range(len(bins))
            ])
            pdf = [val / Abins for val in bins]
            rs = [0]
            for k in range(len(pdf)):
                rs.append(rs[-1] + (domainheight * pdf[k] *
                                    (bounds[k + 1] - bounds[k])))
            reference_state.append(tuple(rs))

            # get background potential energy,
            # noting \rho = \rho_zero(1-\alpha(T-T_zero))
            # and reference state is based on temperature
            # bpe_bckgd = 0.5*(g*rho_zero*(1.0+(alpha*T_zero)))*(domainheight**2)
            # but don't include this as will look at difference over time
            bpe.append(-rho_zero * alpha * g * scipy.integrate.trapz(
                x=reference_state[-1],
                y=[
                    bounds[j] * reference_state[-1][j]
                    for j in range(len(reference_state[-1]))
                ]))

    volume_fraction = numpy.array(volume_fraction)
    reference_state = numpy.array(reference_state)

    bpe_zero = bpe[0]
    bpe = [val - bpe_zero for val in bpe]

    # plot
    fs = 18
    pylab.figure(num=2, figsize=(16.5, 11.5))
    pylab.suptitle('Mixing', fontsize=fs)

    # volume fraction
    pylab.subplot(221)
    pylab.plot(time, volume_fraction[:, 0], label='$T < -0.25$', color='k')
    pylab.plot(time,
               volume_fraction[:, 1],
               label='$-0.25 < T < 0.0$',
               color='g')
    pylab.plot(time,
               volume_fraction[:, 2],
               label='$0.0 < T < 0.25$',
               color='b')
    pylab.plot(time, volume_fraction[:, 3], label='$0.25 < T$', color='0.5')

    pylab.axis([0, time[-1], 0, 0.5])
    pylab.legend(loc=0)
    pylab.grid('on')
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$V/|\\Omega|$', fontsize=fs)
    pylab.title('Volume fraction', fontsize=fs)

    # reference state contours
    pylab.subplot(222)
    for i in index_plot:
        pylab.plot(time, reference_state[:, i], color='k')
    pylab.text(
        time[-1] / 100,
        1.5E-3,
        'From bottom to top contours correspond to values \n $T = -0.5, \, -0.25, \, 0.0, \, 0.25, \, 0.5$ \nwhere the values for $T=-0.5$ and $0.5$ take the values\n$z_* = 0.0$ and $0.1$ respectively',
        bbox=dict(facecolor='white', edgecolor='black'))
    pylab.axis([0, time[-1], 0, domainheight])
    pylab.grid('on')
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$z_*$ (m)', fontsize=fs)
    pylab.title('Reference state', fontsize=fs)

    pylab.subplot(223)
    pylab.plot(bounds, reference_state[-1], color='k')
    pylab.grid('on')
    pylab.axis([-0.5, 0.5, 0, domainheight])
    pylab.xlabel('$T$ ($^\\circ$C)', fontsize=fs)
    pylab.ylabel('$z_*$ (m)', fontsize=fs)
    pylab.title('Reference state at $t=' + str(time[-1]) + '\\,$s',
                fontsize=fs)

    pylab.subplot(224)
    pylab.plot(time, bpe, color='k')
    pylab.grid('on')
    pylab.gca().get_xaxis().get_axes().set_xlim(0.0, time[-1])
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$\\Delta E_b$', fontsize=fs - 2)
    pylab.gca().get_yaxis().set_major_formatter(
        pylab.FormatStrFormatter('%1.1e'))
    pylab.title('Background potential energy', fontsize=fs)

    pylab.savefig('diagnostics/plots/mixing.png')
    return
Ejemplo n.º 44
0
    # Train
    clf = DecisionTreeClassifier().fit(X, y)

    # Plot the decision boundary
    pl.subplot(2, 3, pairidx + 1)

    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                         np.arange(y_min, y_max, plot_step))

    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    cs = pl.contourf(xx, yy, Z)

    pl.xlabel(iris.feature_names[pair[0]])
    pl.ylabel(iris.feature_names[pair[1]])
    pl.axis("tight")

    # Plot the training points
    for i, color in zip(xrange(n_classes), plot_colors):
        idx = np.where(y == i)
        pl.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i])

    pl.axis("tight")

pl.suptitle("Decision surface of a decision tree using paired features")
pl.legend()
pl.show()
Ejemplo n.º 45
0
def plotObservationsComposite(obs, map, scale_len, title, file_name):
    pylab.figure(figsize=(10, 8))
    pylab.axes((0, 0, 1, 0.95))
    colors = ['r', 'g', 'b', 'c', 'm', '#660099', '#ff9900', '#006666']

    ob_ids = np.unique1d(obs['id'])
    ttu_label = False
    asos_label = False
    sndg_label = False

    for ob_id in ob_ids:
        ob_idxs = np.where(obs['id'] == ob_id)[0]
        these_obs = obs[ob_idxs]
        ob_xs, ob_ys = map(these_obs['longitude'], these_obs['latitude'])

        if ob_id[0] == "P":
            ob_num = int(ob_id[1]) - 1
            pylab.plot(ob_xs,
                       ob_ys,
                       'o',
                       mfc=colors[ob_num],
                       mec=colors[ob_num],
                       ms=3,
                       label="NSSL MM (%s)" % ob_id)
        elif ob_id[0] == "K":
            if not asos_label:
                label = "ASOS"
                asos_label = True
            else:
                label = None

            pylab.plot(ob_xs[0], ob_ys[0], 'k*', ms=5, label=label)
        elif ob_id[0] == "1" or ob_id[0] == "2":
            if not ttu_label:
                label = "TTU Sticknet"
                ttu_label = True
            else:
                label = None

            pylab.plot(ob_xs[0],
                       ob_ys[0],
                       'o',
                       mfc="#999999",
                       mec="#999999",
                       ms=3,
                       label=label)
        elif these_obs[0]['obtype'] == "SNDG":
            if not sndg_label:
                label = "Sounding"
                sndg_label = True
            else:
                label = None

            pylab.plot(ob_xs[0], ob_ys[0], 'k^', ms=4, label=label)

    drawPolitical(map, scale_len=scale_len)

    line_objects = [
        l for l in sorted(pylab.gca().lines, key=lambda x: x.get_label())
        if l.get_label()[0] != "_"
    ]
    pylab.legend(line_objects, [l.get_label() for l in line_objects],
                 loc=2,
                 numpoints=1,
                 prop={'size': 'medium'})
    pylab.suptitle(title)
    pylab.savefig(file_name)
    pylab.close()
    return
Ejemplo n.º 46
0
def Doplots_diurnal(mypathforResults, PlottingDF, Site_ID):

    print "Doing diurnal plot for month "
    #Do Diurnal Plots for all 12 months
    #create an X axis series for all 24 hours
    t = np.arange(1, 25, 1)

    item_list = ['Fre_Con', 'Fc_ustar', 'GPP_Con', 'Fc_Con', 'Fc_Lasslop']
    Plottemp = PlottingDF[item_list]

    #figure(1)
    pl.figure(1, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k')
    pl.subplot(321)
    pl.title('Diurnal partitioning month = 1')
    try:
        xdata1a = Plottemp[(
            PlottingDF.index.month == 1)][item_list[0]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1a = True
    except:
        plotxdata1a = False
    try:
        xdata1b = Plottemp[(
            PlottingDF.index.month == 1)][item_list[1]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1b = True
    except:
        plotxdata1b = False
    try:
        xdata1c = Plottemp[(
            PlottingDF.index.month == 1)][item_list[2]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1c = True
    except:
        plotxdata1c = False
    try:
        xdata1d = Plottemp[(
            PlottingDF.index.month == 1)][item_list[3]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1d = True
    except:
        plotxdata1d = False

    if plotxdata1a == True:
        pl.plot(t, xdata1a, 'r', label=str(item_list[0]))
    if plotxdata1b == True:
        pl.plot(t, xdata1b, 'b', label=str(item_list[1]))
    if plotxdata1c == True:
        pl.plot(t, xdata1c, 'k', label=str(item_list[2]))
    if plotxdata1d == True:
        pl.plot(t, xdata1d, 'g', label=str(item_list[3]))
    pl.ylabel('Flux')
    pl.legend(shadow=True, fancybox=True, loc='best')

    pl.subplot(322)
    pl.title('Diurnal partitioning month = 3')
    try:
        xdata1a = Plottemp[(
            PlottingDF.index.month == 3)][item_list[0]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1a = True
    except:
        plotxdata1a = False
    try:
        xdata1b = Plottemp[(
            PlottingDF.index.month == 3)][item_list[1]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1b = True
    except:
        plotxdata1b = False
    try:
        xdata1c = Plottemp[(
            PlottingDF.index.month == 3)][item_list[2]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1c = True
    except:
        plotxdata1c = False
    try:
        xdata1d = Plottemp[(
            PlottingDF.index.month == 3)][item_list[3]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1d = True
    except:
        plotxdata1d = False

    if plotxdata1a == True:
        pl.plot(t, xdata1a, 'r', label=str(item_list[0]))
    if plotxdata1b == True:
        pl.plot(t, xdata1b, 'b', label=str(item_list[1]))
    if plotxdata1c == True:
        pl.plot(t, xdata1c, 'k', label=str(item_list[2]))
    if plotxdata1d == True:
        pl.plot(t, xdata1d, 'g', label=str(item_list[3]))
    pl.ylabel('Flux')
    pl.legend(shadow=True, fancybox=True, loc='best')

    pl.subplot(323)
    pl.title('Diurnal partitioning month = 5')
    try:
        xdata1a = Plottemp[(
            PlottingDF.index.month == 5)][item_list[0]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1a = True
    except:
        plotxdata1a = False
    try:
        xdata1b = Plottemp[(
            PlottingDF.index.month == 5)][item_list[1]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1b = True
    except:
        plotxdata1b = False
    try:
        xdata1c = Plottemp[(
            PlottingDF.index.month == 5)][item_list[2]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1c = True
    except:
        plotxdata1c = False
    try:
        xdata1d = Plottemp[(
            PlottingDF.index.month == 5)][item_list[3]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1d = True
    except:
        plotxdata1d = False

    if plotxdata1a == True:
        pl.plot(t, xdata1a, 'r', label=str(item_list[0]))
    if plotxdata1b == True:
        pl.plot(t, xdata1b, 'b', label=str(item_list[1]))
    if plotxdata1c == True:
        pl.plot(t, xdata1c, 'k', label=str(item_list[2]))
    if plotxdata1d == True:
        pl.plot(t, xdata1d, 'g', label=str(item_list[3]))
    pl.ylabel('Flux')
    pl.legend(shadow=True, fancybox=True, loc='best')

    pl.subplot(324)
    pl.title('Diurnal partitioning month = 7')
    try:
        xdata1a = Plottemp[(
            PlottingDF.index.month == 7)][item_list[0]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1a = True
    except:
        plotxdata1a = False
    try:
        xdata1b = Plottemp[(
            PlottingDF.index.month == 7)][item_list[1]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1b = True
    except:
        plotxdata1b = False
    try:
        xdata1c = Plottemp[(
            PlottingDF.index.month == 7)][item_list[2]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1c = True
    except:
        plotxdata1c = False
    try:
        xdata1d = Plottemp[(
            PlottingDF.index.month == 7)][item_list[3]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1d = True
    except:
        plotxdata1d = False

    if plotxdata1a == True:
        pl.plot(t, xdata1a, 'r', label=str(item_list[0]))
    if plotxdata1b == True:
        pl.plot(t, xdata1b, 'b', label=str(item_list[1]))
    if plotxdata1c == True:
        pl.plot(t, xdata1c, 'k', label=str(item_list[2]))
    if plotxdata1d == True:
        pl.plot(t, xdata1d, 'g', label=str(item_list[3]))
    pl.ylabel('Flux')
    pl.legend(shadow=True, fancybox=True, loc='best')

    pl.subplot(325)
    pl.title('Diurnal partitioning month = 9')
    try:
        xdata1a = Plottemp[(PlottingDF.index.month == 9)][item].groupby(
            [lambda x: x.hour]).mean()
        plotxdata1a = True
    except:
        plotxdata1a = False
        try:
            xdata1a = Plottemp[(
                PlottingDF.index.month == 9)][item_list[0]].groupby(
                    [lambda x: x.hour]).mean()
            plotxdata1a = True
        except:
            plotxdata1a = False
        try:
            xdata1b = Plottemp[(
                PlottingDF.index.month == 9)][item_list[1]].groupby(
                    [lambda x: x.hour]).mean()
            plotxdata1b = True
        except:
            plotxdata1b = False
        try:
            xdata1c = Plottemp[(
                PlottingDF.index.month == 9)][item_list[2]].groupby(
                    [lambda x: x.hour]).mean()
            plotxdata1c = True
        except:
            plotxdata1c = False
        try:
            xdata1d = Plottemp[(
                PlottingDF.index.month == 9)][item_list[3]].groupby(
                    [lambda x: x.hour]).mean()
            plotxdata1d = True
        except:
            plotxdata1d = False

        if plotxdata1a == True:
            pl.plot(t, xdata1a, 'r', label=str(item_list[0]))
        if plotxdata1b == True:
            pl.plot(t, xdata1b, 'b', label=str(item_list[1]))
        if plotxdata1c == True:
            pl.plot(t, xdata1c, 'k', label=str(item_list[2]))
        if plotxdata1d == True:
            pl.plot(t, xdata1d, 'g', label=str(item_list[3]))
        pl.ylabel('Flux')
        pl.legend(shadow=True, fancybox=True, loc='best')

    pl.subplot(326)
    pl.title('Diurnal partitioning month = 11')
    try:
        xdata1a = Plottemp[(
            PlottingDF.index.month == 11)][item_list[0]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1a = True
    except:
        plotxdata1a = False
    try:
        xdata1b = Plottemp[(
            PlottingDF.index.month == 11)][item_list[1]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1b = True
    except:
        plotxdata1b = False
    try:
        xdata1c = Plottemp[(
            PlottingDF.index.month == 11)][item_list[2]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1c = True
    except:
        plotxdata1c = False
    try:
        xdata1d = Plottemp[(
            PlottingDF.index.month == 11)][item_list[3]].groupby(
                [lambda x: x.hour]).mean()
        plotxdata1d = True
    except:
        plotxdata1d = False

    if plotxdata1a == True:
        pl.plot(t, xdata1a, 'r', label=str(item_list[0]))
    if plotxdata1b == True:
        pl.plot(t, xdata1b, 'b', label=str(item_list[1]))
    if plotxdata1c == True:
        pl.plot(t, xdata1c, 'k', label=str(item_list[2]))
    if plotxdata1d == True:
        pl.plot(t, xdata1d, 'g', label=str(item_list[3]))
    pl.ylabel('Flux')
    pl.legend(shadow=True, fancybox=True, loc='best')

    figure(1)
    pl.suptitle('Carbon partitioning ensemble diurnal average at ' + Site_ID)
    pl.subplots_adjust(top=0.85)
    pl.tight_layout()
    pl.savefig(mypathforResults + '/ANN ensemble diurnal average at ' +
               Site_ID)
    #pl.show()
    pl.close()
    time.sleep(1)
pl.figure(figsize=(10, 5))

for j, d in enumerate((1, 20)):
    for i, size in enumerate(sizes):
        p = np.polyfit(xtrain[:size], ytrain[:size], d)
        crossval_err[i] = compute_error(xcrossval, ycrossval, p)
        train_err[i] = compute_error(xtrain[:size], ytrain[:size], p)

    ax = pl.subplot(121 + j)
    pl.plot(sizes, crossval_err, lw=2, label='cross-val error')
    pl.plot(sizes, train_err, lw=2, label='training error')
    pl.plot([0, Ntrain], [error, error], '--k', label='intrinsic error')

    pl.xlabel('traning set size')
    if j == 0:
        pl.ylabel('rms error')
    else:
        ax.yaxis.set_major_formatter(ticker.NullFormatter())

    pl.legend(loc=4)

    pl.ylim(0.0, 2.5)
    pl.xlim(0, 99)

    pl.text(98, 2.45, 'd = %i' % d, ha='right', va='top', fontsize='large')

pl.subplots_adjust(wspace=0.02, left=0.07, right=0.95)
pl.suptitle('Learning Curves', fontsize=18)

pl.show()
Ejemplo n.º 48
0
def Froudenumber(flmlname):
    print "\n********** Calculating the Froude number\n"
    # warn user about assumptions
    print "Froude number calculations makes three assumptions: \n i) domain height = 0.1m \n ii) mid point domain is at x = 0.4 \n iii) initial temperature difference is 1.0 degC"
    domainheight = 0.1
    domainmid = 0.4
    rho_zero, T_zero, alpha, g = le_tools.Getconstantsfromflml(flmlname)
    gprime = rho_zero * alpha * g * 1.0  # this has assumed the initial temperature difference is 1.0 degC

    # get list of vtus
    filelist = le_tools.GetFiles('./')
    logs = [
        'diagnostics/logs/time.log', 'diagnostics/logs/X_ns.log',
        'diagnostics/logs/X_fs.log'
    ]
    try:
        # if have extracted information already just use that
        os.stat('diagnostics/logs/time.log')
        os.stat('diagnostics/logs/X_ns.log')
        os.stat('diagnostics/logs/X_fs.log')
        time = le_tools.ReadLog('diagnostics/logs/time.log')
        X_ns = [
            x - domainmid
            for x in le_tools.ReadLog('diagnostics/logs/X_ns.log')
        ]
        X_fs = [
            domainmid - x
            for x in le_tools.ReadLog('diagnostics/logs/X_fs.log')
        ]
    except OSError:
        # otherwise get X_ns and X_fs and t from vtus
        time, X_ns, X_fs = le_tools.GetXandt(filelist)
        f_time = open('./diagnostics/logs/time.log', 'w')
        for t in time:
            f_time.write(str(t) + '\n')
        f_time.close()
        f_X_ns = open('./diagnostics/logs/X_ns.log', 'w')
        for X in X_ns:
            f_X_ns.write(str(X) + '\n')
        f_X_ns.close()
        f_X_fs = open('./diagnostics/logs/X_fs.log', 'w')
        for X in X_fs:
            f_X_fs.write(str(X) + '\n')
        f_X_fs.close()

        # shift so bot X_ns and X_fs are
        # distance of front from
        #initial position (mid point of domain)
        X_ns = [x - domainmid for x in X_ns]
        X_fs = [domainmid - x for x in X_fs]

    # Calculate U_ns and U_fs from X_ns, X_fs and t
    U_ns = le_tools.GetU(time, X_ns)
    U_fs = le_tools.GetU(time, X_fs)
    U_average = [[], []]

    # If possible average
    # (if fronts have not travelled far enough then will not average)
    start_val, end_val, average_flag_ns = le_tools.GetAverageRange(
        X_ns, 0.2, domainheight)
    if average_flag_ns == True:
        U_average[0].append(pylab.average(U_ns[start_val:end_val]))

    start_val, end_val, average_flag_fs = le_tools.GetAverageRange(
        X_fs, 0.25, domainheight)
    if average_flag_fs == True:
        U_average[1].append(pylab.average(U_fs[start_val:end_val]))

    # plot
    fs = 18
    pylab.figure(num=1, figsize=(16.5, 11.5))
    pylab.suptitle('Front speed', fontsize=fs)

    pylab.subplot(221)
    pylab.plot(time, X_ns, color='k')
    pylab.axis([0, 45, 0, 0.4])
    pylab.grid('on')
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$X$ (m)', fontsize=fs)
    pylab.title('no-slip', fontsize=fs)

    pylab.subplot(222)
    pylab.plot([x / domainheight for x in X_ns],
               [U / math.sqrt(gprime * domainheight) for U in U_ns],
               color='k')
    pylab.axis([0, 4, 0, 0.6])
    pylab.grid('on')
    pylab.axhline(0.406, color='k')
    pylab.axhline(0.432, color='k')
    pylab.text(3.95,
               0.396,
               'Hartel 2000',
               bbox=dict(facecolor='white', edgecolor='black'),
               va='top',
               ha='right')
    pylab.text(3.95,
               0.442,
               'Simpson 1979',
               bbox=dict(facecolor='white', edgecolor='black'),
               ha='right')
    pylab.xlabel('$X/H$', fontsize=fs)
    pylab.ylabel('$Fr$', fontsize=fs)
    pylab.title('no-slip', fontsize=fs)
    if average_flag_ns == True:
        pylab.axvline(2.0, color='k')
        pylab.axvline(3.0, color='k')
        pylab.text(
            0.05,
            0.01,
            'Average Fr = ' + '{0:.2f}'.format(
                U_average[0][0] / math.sqrt(gprime * domainheight)) +
            '\nvertical lines indicate the range \nover which the average is taken',
            bbox=dict(facecolor='white', edgecolor='black'))

    pylab.subplot(223)
    pylab.plot(time, X_fs, color='k')
    pylab.axis([0, 45, 0, 0.4])
    pylab.grid('on')
    pylab.xlabel('$t$ (s)', fontsize=fs)
    pylab.ylabel('$X$ (m)', fontsize=fs)
    pylab.title('free-slip', fontsize=fs)

    pylab.subplot(224)
    pylab.plot([x / domainheight for x in X_fs],
               [U / math.sqrt(gprime * domainheight) for U in U_fs],
               color='k')
    pylab.axis([0, 4, 0, 0.6])
    pylab.grid('on')
    pylab.axhline(0.477, color='k')
    pylab.text(3.95,
               0.467,
               'Hartel 2000',
               va='top',
               bbox=dict(facecolor='white', edgecolor='black'),
               ha='right')
    pylab.xlabel('$X/H$', fontsize=fs)
    pylab.ylabel('$Fr$', fontsize=fs)
    pylab.title('free-slip', fontsize=fs)
    if average_flag_fs == True:
        pylab.text(
            0.05,
            0.01,
            'Average Fr  = ' + '{0:.2f}'.format(
                U_average[1][0] / math.sqrt(gprime * domainheight)) +
            '\nvertical lines indicate the range \nover which the average is taken',
            bbox=dict(facecolor='white', edgecolor='black'))
        pylab.axvline(2.5, color='k')
        pylab.axvline(3.0, color='k')

    pylab.savefig('diagnostics/plots/front_speed.png')
    return
Ejemplo n.º 49
0
# Encoding the dependent Variable
from sklearn.preprocessing import LabelEncoder
labelencoder_Y = LabelEncoder()
y = labelencoder_Y.fit_transform(y)


#Rescale data (between 0 and 1)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
rescaledX = scaler.fit_transform(X)


import pylab as pl
dataset.drop('Damage',axis=1).hist(bins=30, figsize=(20,20))
pl.suptitle("Histogram for each numeric input variable")
plt.savefig('Damage_hist')
plt.show()

#Apply kernel to transform the data to a higher dimension
kernels = ['Polynomial', 'RBF', 'Sigmoid','Linear']
#A function which returns the corresponding SVC model
def getClassifier(ktype):
    if ktype == 0:
        # Polynomial kernal
        return SVC(kernel='poly', degree=8, gamma="auto")
    elif ktype == 1:
        # Radial Basis Function kernal
        return SVC(kernel='rbf', gamma="auto")
    elif ktype == 2:
        # Sigmoid kernal
Ejemplo n.º 50
0
def plotSoundingObservationsComposite(obs,
                                      map,
                                      scale_len,
                                      dimensions,
                                      title,
                                      file_name,
                                      radars=None):
    figure = pylab.figure(figsize=(10, 8))
    x_size, y_size = dimensions

    grid_spec = GridSpec(2, 2, width_ratios=[3, 1], height_ratios=[1, 3])

    colors = [
        'r', 'g', 'b', 'c', 'm', '#660099', '#006666', '#99ff00', '#ff9900',
        '#666666'
    ]
    names = [
        "Sounding (NSSL)", "Sounding (NCAR)", "Sounding (NCAR)",
        "Sounding (NCAR)", "Sounding (NSSL)", "Sounding (NSSL)"
    ]
    ob_ids = np.unique1d(obs['id'])

    bottom = 0.075
    top = 0.95
    stretch_fac = (1 - top + bottom) / 2. - 0.011

    pylab.subplots_adjust(left=0.1 + stretch_fac,
                          bottom=bottom,
                          right=0.9 - stretch_fac,
                          top=top,
                          hspace=0.075,
                          wspace=0.075)

    ax_xy = figure.add_subplot(grid_spec[2])
    ax_xz = figure.add_subplot(grid_spec[0], sharex=ax_xy)
    ax_yz = figure.add_subplot(grid_spec[3], sharey=ax_xy)

    def labelMe(do_label, label):
        if do_label:
            return not do_label, label
        else:
            return do_label, None

    color_num = 0
    prof_label = True
    sfc_label = True
    asos_label = True
    ttu_label = True

    for ob_id in ob_ids:
        plot_vertical = False
        ob_idxs = np.where(obs['id'] == ob_id)[0]
        these_obs = obs[ob_idxs]
        ob_xs, ob_ys = map(these_obs['longitude'], these_obs['latitude'])

        if np.any(ob_xs >= 0) and np.any(ob_xs <= x_size) and np.any(
                ob_ys >= 0) and np.any(ob_ys <= y_size):
            if these_obs['obtype'][0] == "PROF":
                prof_label, label = labelMe(prof_label, "Profiler")
                color = "#ff9900"
                marker = 'o'
                ms = 3
                plot_vertical = True
            elif these_obs['obtype'][0] == "SNDG":
                color = colors[color_num]
                label = "Sounding"  # % ob_id
                marker = 'o'
                ms = 3
                plot_vertical = True
                color_num += 1
            elif ob_id[0] == "P":
                color = "#999999"
                sfc_label, label = labelMe(sfc_label, "Sounding")
                marker = 'o'
                ms = 3
            elif ob_id[0] == "K":
                asos_label, label = labelMe(asos_label, "ASOS")
                color = 'k'
                marker = '*'
                ms = 5
            elif ob_id[0] == "1" or ob_id[0] == "2":
                ttu_label, label = labelMe(ttu_label, "TTU Sticknet")
                color = "#003300"
                marker = 'o'
                ms = 3

            ax_xy.plot(ob_xs,
                       ob_ys,
                       marker,
                       mfc=color,
                       mec=color,
                       ms=ms,
                       label=label)[0]
            if plot_vertical:
                ax_xz.plot(ob_xs,
                           these_obs['elevation'] / 1000.,
                           marker,
                           mfc=color,
                           mec=color,
                           ms=ms)
                ax_yz.plot(these_obs['elevation'] / 1000.,
                           ob_ys,
                           marker,
                           mfc=color,
                           mec=color,
                           ms=ms)

    dummy, zlim = ax_xz.get_ylim()

    for ob_id in ob_ids:
        ob_idxs = np.where(obs['id'] == ob_id)[0]
        these_obs = obs[ob_idxs]
        ob_xs, ob_ys = map(these_obs['longitude'], these_obs['latitude'])

        plot_vertical = False

        if np.any(ob_xs >= 0) and np.any(ob_xs <= x_size) and np.any(
                ob_ys >= 0) and np.any(ob_ys <= y_size):
            if these_obs['obtype'][0] == "PROF":
                color = "#ff9900"
                plot_vertical = True
            elif these_obs['obtype'][0] == "SNDG":
                color = "#999999"
                plot_vertical = True

            if plot_vertical:
                ax_xy.plot(ob_xs[0], ob_ys[0], 'x', color=color, ms=6)
                ax_xz.plot([ob_xs[0], ob_xs[0]], [0, zlim],
                           '--',
                           color=color,
                           lw=0.5)
                ax_yz.plot([0, zlim], [ob_ys[0], ob_ys[0]],
                           '--',
                           color=color,
                           lw=0.5)

    if radars:
        rad_label = True
        for radar in radars:
            rad_label, label = labelMe(rad_label, "Radar")
            (lat, lon), range = radar
            radar_x, radar_y = map(lon, lat)
            ax_xy.plot(radar_x, radar_y, 'b<', ms=4, label=label)
            ax_xy.add_patch(
                Circle((radar_x, radar_y), range, fc='none', ec='k'))

    ax_xz.set_ylim(0, zlim)
    ax_yz.set_xlim(0, zlim)

    pylab.sca(ax_xy)
    drawPolitical(map, scale_len=scale_len)

    ax_xz.set_xlabel("x")
    ax_xz.set_ylabel("z (km)")
    ax_yz.set_xlabel("z (km)")
    ax_yz.set_ylabel("y", rotation=-90)

    line_objects = [
        l for l in sorted(ax_xy.lines, key=lambda x: x.get_label())
        if l.get_label()[0] != "_"
    ]
    ax_xy.legend(line_objects, [l.get_label() for l in line_objects],
                 loc=1,
                 numpoints=1,
                 ncol=2,
                 prop={'size': 'medium'},
                 bbox_to_anchor=(1.28, 1.07, 0.33, 0.33),
                 handletextpad=0,
                 columnspacing=0)

    pylab.suptitle(title)
    pylab.savefig(file_name)
    pylab.close()
    return
Ejemplo n.º 51
0
    def run(self, xrange="log", diff="relative"):
        r"""
        Compare accuracy of different methods for computing f.

        *xrange* is::

            log:    [10^-3,10^5]
            logq:   [10^-4, 10^1]
            linear: [1,1000]
            zoom:   [1000,1010]
            neg:    [-100,100]

        For arbitrary range use "start:stop:steps:scale" where scale is
        one of log, lin, or linear.

        *diff* is "relative", "absolute" or "none"

        *x_bits* is the precision with which the x values are specified.  The
        default 23 should reproduce the equivalent of a single precisio
        """
        linear = not xrange.startswith("log")
        if xrange == "zoom":
            start, stop, steps = 1000, 1010, 2000
        elif xrange == "neg":
            start, stop, steps = -100.1, 100.1, 2000
        elif xrange == "linear":
            start, stop, steps = 1, 1000, 2000
            start, stop, steps = 0.001, 2, 2000
        elif xrange == "log":
            start, stop, steps = -3, 5, 400
        elif xrange == "logq":
            start, stop, steps = -4, 1, 400
        elif ':' in xrange:
            parts = xrange.split(':')
            linear = parts[3] != "log" if len(parts) == 4 else True
            steps = int(parts[2]) if len(parts) > 2 else 400
            start = float(parts[0])
            stop = float(parts[1])

        else:
            raise ValueError("unknown range " + xrange)
        with mp.workprec(500):
            # Note: we make sure that we are comparing apples to apples...
            # The x points are set using single precision so that we are
            # examining the accuracy of the transformation from x to f(x)
            # rather than x to f(nearest(x)) where nearest(x) is the nearest
            # value to x in the given precision.
            if linear:
                start = max(start, self.limits[0])
                stop = min(stop, self.limits[1])
                qrf = np.linspace(start, stop, steps, dtype='single')
                #qrf = np.linspace(start, stop, steps, dtype='double')
                qr = [mp.mpf(float(v)) for v in qrf]
                #qr = mp.linspace(start, stop, steps)
            else:
                start = np.log10(max(10**start, self.limits[0]))
                stop = np.log10(min(10**stop, self.limits[1]))
                qrf = np.logspace(start, stop, steps, dtype='single')
                #qrf = np.logspace(start, stop, steps, dtype='double')
                qr = [mp.mpf(float(v)) for v in qrf]
                #qr = [10**v for v in mp.linspace(start, stop, steps)]

        target = self.call_mpmath(qr, bits=500)
        pylab.subplot(121)
        self.compare(qr, 'single', target, linear, diff)
        pylab.legend(loc='best')
        pylab.subplot(122)
        self.compare(qr, 'double', target, linear, diff)
        pylab.legend(loc='best')
        pylab.suptitle(self.name + " compared to 500-bit mpmath")
Ejemplo n.º 52
0
    # lvl = logging.DEBUG
    # logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)

    # Optimize the model.
    for step in range(50):
        print('Tractor params:')
        tractor.printThawedParams()
        dlnp,X,alpha = tractor.optimize(damp=1.)
        print('dlnp', dlnp)
        print('galaxy:', moggal)
        #print('Mog', moggal.mog.getParams())
        if dlnp == 0:
            break

        # Plot the model as we're optimizing...
        mod = tractor.getModelImage(0)
        chi = (tim.getImage() - mod) * tim.getInvError()
        plt.clf()
        plt.subplot(1,2,1)
        plt.imshow(mod, interpolation='nearest', origin='lower')
        plt.title('Model')
        plt.subplot(1,2,2)
        mx = np.abs(chi).max()
        plt.imshow(chi, interpolation='nearest', origin='lower',
                   vmin=-mx, vmax=mx)
        plt.colorbar()
        plt.title('Chi residuals')
        plt.suptitle('MoG model after optimization step %i' % step)
        plt.savefig('mod-o%02i.png' % step)
        
Ejemplo n.º 53
0
def _plot_mods(tims,
               mods,
               blobwcs,
               titles,
               bands,
               coimgs,
               cons,
               bslc,
               blobw,
               blobh,
               ps,
               chi_plots=True,
               rgb_plots=False,
               main_plot=True,
               rgb_format='%s'):
    import numpy as np

    subims = [[] for m in mods]
    chis = dict([(b, []) for b in bands])

    make_coimgs = (coimgs is None)
    if make_coimgs:
        print('_plot_mods: blob shape', (blobh, blobw))
        coimgs = [np.zeros((blobh, blobw)) for b in bands]
        cons = [np.zeros((blobh, blobw)) for b in bands]

    for iband, band in enumerate(bands):
        comods = [np.zeros((blobh, blobw)) for m in mods]
        cochis = [np.zeros((blobh, blobw)) for m in mods]
        comodn = np.zeros((blobh, blobw))
        mn, mx = 0, 0
        sig1 = 1.
        for itim, tim in enumerate(tims):
            if tim.band != band:
                continue
            R = tim_get_resamp(tim, blobwcs)
            if R is None:
                continue
            (Yo, Xo, Yi, Xi) = R

            rechi = np.zeros((blobh, blobw))
            chilist = []
            comodn[Yo, Xo] += 1
            for imod, mod in enumerate(mods):
                chi = ((tim.getImage()[Yi, Xi] - mod[itim][Yi, Xi]) *
                       tim.getInvError()[Yi, Xi])
                rechi[Yo, Xo] = chi
                chilist.append((rechi.copy(), itim))
                cochis[imod][Yo, Xo] += chi
                comods[imod][Yo, Xo] += mod[itim][Yi, Xi]
            chis[band].append(chilist)
            # we'll use 'sig1' of the last tim in the list below...
            mn, mx = -10. * tim.sig1, 30. * tim.sig1
            sig1 = tim.sig1
            if make_coimgs:
                nn = (tim.getInvError()[Yi, Xi] > 0)
                coimgs[iband][Yo, Xo] += tim.getImage()[Yi, Xi] * nn
                cons[iband][Yo, Xo] += nn

        if make_coimgs:
            coimgs[iband] /= np.maximum(cons[iband], 1)
            coimg = coimgs[iband]
            coimgn = cons[iband]
        else:
            coimg = coimgs[iband][bslc]
            coimgn = cons[iband][bslc]

        for comod in comods:
            comod /= np.maximum(comodn, 1)
        ima = dict(vmin=mn, vmax=mx, ticks=False)
        resida = dict(vmin=-5. * sig1, vmax=5. * sig1, ticks=False)
        for subim, comod, cochi in zip(subims, comods, cochis):
            subim.append((coimg, coimgn, comod, ima, cochi, resida))

    # Plot per-band image, model, and chi coadds, and RGB images
    rgba = dict(ticks=False)
    rgbs = []
    rgbnames = []
    plt.figure(1)
    for i, subim in enumerate(subims):
        plt.clf()
        rows, cols = 3, 5
        for ib, b in enumerate(bands):
            plt.subplot(rows, cols, ib + 1)
            plt.title(b)
        plt.subplot(rows, cols, 4)
        plt.title('RGB')
        plt.subplot(rows, cols, 5)
        plt.title('RGB(stretch)')

        imgs = []
        themods = []
        resids = []
        for j, (img, imgn, mod, ima, chi, resida) in enumerate(subim):
            imgs.append(img)
            themods.append(mod)
            resid = img - mod
            resid[imgn == 0] = np.nan
            resids.append(resid)

            if main_plot:
                plt.subplot(rows, cols, 1 + j + 0)
                dimshow(img, **ima)
                plt.subplot(rows, cols, 1 + j + cols)
                dimshow(mod, **ima)
                plt.subplot(rows, cols, 1 + j + cols * 2)
                # dimshow(-chi, **imchi)
                # dimshow(imgn, vmin=0, vmax=3)
                dimshow(resid, nancolor='r', **resida)
        rgb = get_rgb(imgs, bands)
        if i == 0:
            rgbs.append(rgb)
            rgbnames.append(rgb_format % 'Image')
        if main_plot:
            plt.subplot(rows, cols, 4)
            dimshow(rgb, **rgba)
        rgb = get_rgb(themods, bands)
        rgbs.append(rgb)
        rgbnames.append(rgb_format % titles[i])
        if main_plot:
            plt.subplot(rows, cols, cols + 4)
            dimshow(rgb, **rgba)
            plt.subplot(rows, cols, cols * 2 + 4)
            dimshow(get_rgb(resids, bands, mnmx=(-10, 10)), **rgba)

            mnmx = -5, 300
            kwa = dict(mnmx=mnmx, arcsinh=1)
            plt.subplot(rows, cols, 5)
            dimshow(get_rgb(imgs, bands, **kwa), **rgba)
            plt.subplot(rows, cols, cols + 5)
            dimshow(get_rgb(themods, bands, **kwa), **rgba)
            plt.subplot(rows, cols, cols * 2 + 5)
            mnmx = -100, 100
            kwa = dict(mnmx=mnmx, arcsinh=1)
            dimshow(get_rgb(resids, bands, **kwa), **rgba)
            plt.suptitle(titles[i])
            ps.savefig()

    if rgb_plots:
        # RGB image and model
        plt.figure(2)
        for rgb, tt in zip(rgbs, rgbnames):
            plt.clf()
            dimshow(rgb, **rgba)
            plt.title(tt)
            ps.savefig()

    if not chi_plots:
        return

    imchi = dict(cmap='RdBu', vmin=-5, vmax=5)

    plt.figure(1)
    # Plot per-image chis: in a grid with band along the rows and images along the cols
    cols = max(len(v) for v in chis.values())
    rows = len(bands)
    for imod in range(len(mods)):
        plt.clf()
        for row, band in enumerate(bands):
            sp0 = 1 + cols * row
            # chis[band] = [ (one for each tim:) [ (one for each mod:) (chi,itim), (chi,itim) ], ...]
            for col, chilist in enumerate(chis[band]):
                chi, itim = chilist[imod]
                plt.subplot(rows, cols, sp0 + col)
                dimshow(-chi, **imchi)
                plt.xticks([])
                plt.yticks([])
                plt.title(tims[itim].name)
        #plt.suptitle(titles[imod])
        ps.savefig()
Ejemplo n.º 54
0
            iou_epochs = np.array(iou_epochs)
            sort_idx = np.argsort(iou_epochs)
            iou_epochs = iou_epochs[sort_idx]
            iou_values = iou_values[sort_idx]

    if os.path.exists(iou2_res_fn):
        with open(iou2_res_fn, 'r') as f:
            # Assumes only one line
            fields = f.read().split('\t')[1:-1]
            iou2_values = [float(x)/100 for x in fields]


    plt.figure(figsize=(10,5))
    plt.subplot(1, 2, 1)
    plt.plot(sim_epochs, sim_values, 'b-o')
    plt.title('Simulation')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')

    plt.subplot(1, 2, 2)
    plt.plot(iou_epochs, iou_values, 'r-o')
    plt.plot(range(1, len(iou2_values)+1), iou2_values, 'g-o')
    plt.legend(['Jacquard', 'Cornell'])
    plt.title('IOU > 0.4')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.suptitle('')

    plt.show()

Ejemplo n.º 55
0
            color=cols[inst_ind],
            mec='black',
            zorder=1,
            mew=2,
            ms=5)

    fs, pvals, etas = do_anova(meas, n_reps_bundled, thetas, n_rules=2)
    ax.set_title('%s\nF=[%.2f, %.2f, %.2f]\npval=[%.5f, %.5f, %.5f]\netas=[%.3f, %.3f, %.3f]' %\
     (var_names[ind], fs[0], fs[1], fs[2], pvals[0],pvals[1],pvals[2],etas[0],etas[1],etas[2]),
     fontsize=8)

axes[1, 2].remove()
axes[1, 1].set_xlabel('Model\'s Theta freq. (Hz)')
pl.tight_layout()
pl.suptitle(
    'Effect of rule difficulty and theta frequency on DDM params, RT and accuracy',
    fontsize=8)
pl.subplots_adjust(left=.09,
                   bottom=.1,
                   right=.97,
                   top=.85,
                   wspace=.28,
                   hspace=.32)

############################################################################
################## 		compute "optimal" theta freq 		################
############################################################################

optim_theta_freq = np.argmax(corr_all_inst[:, :, :].squeeze(),
                             axis=0) * freq_step + thetas[0]
Ejemplo n.º 56
0
            if PLOT and True:
               fig,axes = p.subplots(nrows=3,ncols=1)
               p.subplot(311); p.plot( noise_array_pre_filter[pad][sep][mychan].real,alpha=.5); #p.colorbar();
               p.subplot(311); p.plot( noise_array[pad][sep][mychan].real,'k-'); #p.colorbar(); p.show()
               p.subplot(311); p.plot( noise_array_top_hat[pad][sep][mychan].real,'r-'); #p.colorbar(); p.show()
               p.ylabel('Real')
               p.subplot(312); p.plot( noise_array_pre_filter[pad][sep][mychan].imag,alpha=.5); #p.colorbar();
               p.subplot(312); p.plot( noise_array[pad][sep][mychan].imag,'k-'); #p.colorbar(); p.show()
               p.subplot(312); p.plot( noise_array_top_hat[pad][sep][mychan].imag,'r-'); #p.colorbar(); p.show()
               p.ylabel('Imag')
               p.subplot(313); p.plot( n.angle(noise_array_pre_filter[pad][sep][mychan]),alpha=.5); #p.colorbar();
               p.subplot(313); p.plot( n.angle(noise_array[pad][sep][mychan]),'k-'); #p.colorbar(); p.show()
               p.subplot(313); p.plot( n.angle(noise_array_top_hat[pad][sep][mychan]),'r-'); #p.colorbar(); p.show()
               p.ylabel('Phase')
               p.suptitle('%d_%d'%a.miriad.bl2ij(bl))
            print '[Done]'
    if opts.output: 
        filename = 'fringe_rate_profile_pad{pad}_int{inttime}_chan{chan:d}_pol{pol}.pkl'.format(pad=pad,inttime=opts.inttime,chan=int(mychan),pol=opts.pol)
        print("saving to: {filename}".format(filename=filename))
        f = open(filename,'w')
        frps['chan'] = mychan
        frps['freqs'] = frp_freqs
        frps['frp'] = frp
        frps['firs'] = firs
        frps['timebins'] = timebins
        pickle.dump(frps,f)
        f.close()
if PLOT:
    ax_frp.plot(frp_freqs*1e3,frp,label='frp0',color='black')
    #ax_frp.plot(frp_freqs*1e3,skew([mn,sq,sk],bins),'k--',label='data')
Ejemplo n.º 57
0
a1.plot(lst_r, lst_all1, marker='o', color='k')
a1.plot(lst_r, lst_sf1, marker='o', color='b')

a2.plot(lst_r, lst_q2, marker='o', color='r')
a2.plot(lst_r, lst_all2, marker='o', color='k')
a2.plot(lst_r, lst_sf2, marker='o', color='b')

a3.plot(lst_r, lst_q3, marker='o', color='r')
a3.plot(lst_r, lst_all3, marker='o', color='k')
a3.plot(lst_r, lst_sf3, marker='o', color='b')

a4.plot(lst_r, lst_q4, marker='o', color='r', label='Quiescent Centrals')
a4.plot(lst_r, lst_all4, marker='o', color='k', label='All Cnetrals')
a4.plot(lst_r, lst_sf4, marker='o', color='b', label='Star-Forming Centrals')

pylab.suptitle('Quiescent Satellite Percentage per Aperture Radius',
               fontsize=17)
fig.text(0.35, 0.01, "Aperture Radius (kpc)", fontsize=18)
fig.text(0.05,
         0.9,
         "Quiescent Satellite Percentage (Q. sat./total sat.)",
         rotation="vertical",
         fontsize=15)

a1.set_title('0.5 < z < 1.0', fontsize=10)
a2.set_title('1.0 < z < 1.5', fontsize=10)
a3.set_title('1.5 < z < 2.0', fontsize=10)
a4.set_title('2.0 < z < 1.5', fontsize=10)

fig.subplots_adjust(wspace=0.1)

a4.legend(loc=1)
Ejemplo n.º 58
0
def tim_plots(tims, bands, ps):
    # Pixel histograms of subimages.
    for b in bands:
        sig1 = np.median([tim.sig1 for tim in tims if tim.band == b])
        plt.clf()
        for tim in tims:
            if tim.band != b:
                continue
            # broaden range to encompass most pixels... only req'd
            # when sky is bad
            lo, hi = -5. * sig1, 5. * sig1
            pix = tim.getImage()[tim.getInvError() > 0]
            lo = min(lo, np.percentile(pix, 5))
            hi = max(hi, np.percentile(pix, 95))
            plt.hist(pix,
                     range=(lo, hi),
                     bins=50,
                     histtype='step',
                     alpha=0.5,
                     label=tim.name)
        plt.legend()
        plt.xlabel('Pixel values')
        plt.title('Pixel distributions: %s band' % b)
        ps.savefig()

        plt.clf()
        lo, hi = -5., 5.
        for tim in tims:
            if tim.band != b:
                continue
            ie = tim.getInvError()
            pix = (tim.getImage() * ie)[ie > 0]
            plt.hist(pix,
                     range=(lo, hi),
                     bins=50,
                     histtype='step',
                     alpha=0.5,
                     label=tim.name)
        plt.legend()
        plt.xlabel('Pixel values (sigma)')
        plt.xlim(lo, hi)
        plt.title('Pixel distributions: %s band' % b)
        ps.savefig()

    # Plot image pixels, invvars, masks
    for tim in tims:
        plt.clf()
        plt.subplot(2, 2, 1)
        dimshow(tim.getImage(), vmin=-3. * tim.sig1, vmax=10. * tim.sig1)
        plt.title('image')
        plt.subplot(2, 2, 2)
        dimshow(tim.getInvError(), vmin=0, vmax=1.1 / tim.sig1)
        plt.title('inverr')
        if tim.dq is not None:
            # plt.subplot(2,2,3)
            # dimshow(tim.dq, vmin=0, vmax=tim.dq.max())
            # plt.title('DQ')
            plt.subplot(2, 2, 3)
            dimshow(((tim.dq & tim.dq_saturation_bits) > 0),
                    vmin=0,
                    vmax=1.5,
                    cmap='hot')
            plt.title('SATUR')
        plt.subplot(2, 2, 4)
        dimshow(tim.getImage() * (tim.getInvError() > 0),
                vmin=-3. * tim.sig1,
                vmax=10. * tim.sig1)
        okpix = tim.getImage()[tim.getInvError() > 0]
        plt.title('image (masked) range [%.3g, %.3g]' %
                  (np.min(okpix), np.max(okpix)))
        plt.suptitle(tim.name)
        ps.savefig()

        if True and tim.dq is not None:
            from legacypipe.bits import DQ_BITS
            plt.clf()
            bitmap = dict([(v, k) for k, v in DQ_BITS.items()])
            k = 1
            for i in range(12):
                bitval = 1 << i
                if not bitval in bitmap:
                    continue
                # only 9 bits are actually used
                plt.subplot(3, 3, k)
                k += 1
                plt.imshow((tim.dq & bitval) > 0, vmin=0, vmax=1.5, cmap='hot')
                plt.title(bitmap[bitval])
            plt.suptitle(
                'Mask planes: %s (%s %s)' %
                (tim.name, tim.imobj.image_filename, tim.imobj.ccdname))
            ps.savefig()

            im = tim.imobj
            if im.camera == 'decam':
                from legacypipe.decam import decam_has_dq_codes
                print(tim.name, ': plver "%s"' % im.plver, 'has DQ codes:',
                      decam_has_dq_codes(im.plver))
            if im.camera == 'decam' and decam_has_dq_codes(im.plver):
                # Integer codes, not bitmask.  Re-read and plot.
                dq = im.read_dq(slice=tim.slice)
                plt.clf()
                plt.subplot(1, 3, 1)
                dimshow(tim.getImage(),
                        vmin=-3. * tim.sig1,
                        vmax=30. * tim.sig1)
                plt.title('image')
                plt.subplot(1, 3, 2)
                dimshow(tim.getInvError(), vmin=0, vmax=1.1 / tim.sig1)
                plt.title('inverr')
                plt.subplot(1, 3, 3)
                plt.imshow(dq,
                           interpolation='nearest',
                           origin='lower',
                           cmap='tab10',
                           vmin=-0.5,
                           vmax=9.5)
                plt.colorbar()
                plt.title('DQ codes')
                plt.suptitle(
                    '%s (%s %s) PLVER %s' %
                    (tim.name, im.image_filename, im.ccdname, im.plver))
                ps.savefig()
Ejemplo n.º 59
0
	pl.plot(z_after,  (yp[ii+1, :]-yp[0, :]), '.r', label='HT', markersize = 2)
	pl.plot(z_after,  (yp_after-yp_before), '.b', label='PyHT', markersize = 2)
	ms.sciy()
	pl.grid('on')
	pl.ylabel("$\Delta$y'")
	pl.xlabel('z[m]')
	pl.legend(prop = {'size':14})
	pl.subplot(2,3,6)
	pl.hist((100*np.abs((yp_after-yp_before)-(yp[ii+1, :]-yp[0, :]))/np.std(yp[ii+1, :]-yp[0, :])), bins=100, range=(0,100))
	pl.xlim(0,100)
	pl.xlabel('Error_y [%]')
	pl.ylabel('Occurrences')
	rms_err_y = np.std(100*np.abs((yp_after-yp_before)-(yp[ii+1, :]-yp[0, :]))/np.std(yp[ii+1, :]-yp[0, :]))


	pl.suptitle('Turn %d rms_err_x = %e rms_err_y = %e'%(ii, rms_err_x, rms_err_y))

	pl.savefig(filename.split('_prb.dat')[0]+'_%02d.png'%ii, dpi=150)

	rms_err_x_list.append(rms_err_x)
	rms_err_y_list.append(rms_err_y)
	
	pl.ion()
	pl.draw()
	time.sleep(1.)
	
pl.figure(1000)
pl.plot(rms_err_x_list, '.-', markersize = 10, linewidth=2, label='x')
pl.plot(rms_err_y_list, '.-', markersize = 10, linewidth=2, label='y')
pl.grid('on')
pl.ylabel('''Relative r.m.s. error [%]''')
Ejemplo n.º 60
0
plt.figure()

rel_err = moving_average(dg_err - p_err, opts.window)

if opts.split_by_time is not None:
    nplots = int(math.ceil(len(rel_err) / (opts.split_by_time * 60.0 * 60.0)))
    plen = int(opts.split_by_time * 60.0 * 60.0)
    offset_time = opts.split_by_time / 2
else:
    nplots = 1
    plen = len(rel_err)
    offset_time = 0

t_local = int(t_first + (opts.timezone * 60 * 60))

plt.suptitle("Relative error between corrected and uncorrected rovers")

ymin = min(0, min(rel_err) * 1.2)
ymax = max(rel_err) * 1.2

for i in range(nplots):
    ax = plt.subplot(nplots, 1, i + 1)

    tick_pos = range(0, plen, 30*60)
    tick_label = [format_time(gps_to_time(t_local + i * plen + offset_time + x)) for x in tick_pos]
    ax.set_xticks(tick_pos)
    ax.set_xticklabels(tick_label, rotation=45)
    ax.set_xlim(0, plen)
    ax.set_ylim(ymin, ymax)

    dat = rel_err[i * plen: min(len(rel_err), (i+1)*plen)]