def create_lifetime_chart(self, classname, filename=''):
        """
        Create chart that depicts the lifetime of the instance registered with
        `classname`. The output is written to `filename`.
        """
        try:
            from pylab import figure, title, xlabel, ylabel, plot, savefig
        except ImportError:
            return HtmlStats.nopylab_msg % (classname + " lifetime")

        cnt = []
        for tobj in self.index[classname]:
            cnt.append([tobj.birth, 1])
            if tobj.death:
                cnt.append([tobj.death, -1])
        cnt.sort()
        for i in range(1, len(cnt)):
            cnt[i][1] += cnt[i - 1][1]

        x = [t for [t, c] in cnt]
        y = [c for [t, c] in cnt]

        figure()
        xlabel("Execution time [s]")
        ylabel("Instance #")
        title("%s instances" % classname)
        plot(x, y, 'o')
        savefig(filename)

        return self.chart_tag % (os.path.basename(filename))
def check_vpd_ks2_astrometry():
    """
    Check the VPD and quiver plots for our KS2-extracted, re-transformed astrometry.
    """
    catFile = workDir + '20.KS2_PMA/wd1_catalog.fits'
    tab = atpy.Table(catFile)

    good = (tab.xe_160 < 0.05) & (tab.ye_160 < 0.05) & \
        (tab.xe_814 < 0.05) & (tab.ye_814 < 0.05) & \
        (tab.me_814 < 0.05) & (tab.me_160 < 0.05)

    tab2 = tab.where(good)

    dx = (tab2.x_160 - tab2.x_814) * ast.scale['WFC'] * 1e3
    dy = (tab2.y_160 - tab2.y_814) * ast.scale['WFC'] * 1e3

    py.clf()
    q = py.quiver(tab2.x_814, tab2.y_814, dx, dy, scale=5e2)
    py.quiverkey(q, 0.95, 0.85, 5, '5 mas', color='red', labelcolor='red')
    py.savefig(workDir + '20.KS2_PMA/vec_diffs_ks2_all.png')

    py.clf()
    py.plot(dy, dx, 'k.', ms=2)
    lim = 30
    py.axis([-lim, lim, -lim, lim])
    py.xlabel('Y Proper Motion (mas)')
    py.ylabel('X Proper Motion (mas)')
    py.savefig(workDir + '20.KS2_PMA/vpd_ks2_all.png')

    idx = np.where((np.abs(dx) < 10) & (np.abs(dy) < 10))[0]
    print('Cluster Members (within dx < 10 mas and dy < 10 mas)')
    print(('   dx = {dx:6.2f} +/- {dxe:6.2f} mas'.format(dx=dx[idx].mean(),
                                                        dxe=dx[idx].std())))
    print(('   dy = {dy:6.2f} +/- {dye:6.2f} mas'.format(dy=dy[idx].mean(),
                                                        dye=dy[idx].std())))
    def plot_matches(self, name, show_below = True, match_maximum = None):
        """ 対応点を線で結んで画像を表示する
          入力: im1,im2(配列形式の画像)、locs1,locs2(特徴点座標)
             machescores(match()の出力)、
             show_below(対応の下に画像を表示するならTrue)"""
        im1 = self._image_1.get_array_image()
        im2 = self._image_2.get_array_image()
        self.appendimages()
        im3 = self._append_image
        if self._match_score is None:
            self.match()
        locs1 = self._image_1.get_shift_location()
        locs2 = self._image_2.get_shift_location()
        if show_below:
            im3 = numpy.vstack((im3,im3))
        pylab.figure(dpi=160)
        pylab.gray()
        pylab.imshow(im3, aspect = 'auto')

        cols1 = im1.shape[1]
        match_num = 0
        for i,m in enumerate(self._match_score):
            if m > 0 : 
                pylab.plot([locs1[i][0],locs2[m][0]+cols1], [locs1[i][1],locs2[m][1]], 'c')
                match_num = match_num + 1
            if match_maximum is not None and match_num >= match_maximum:
                break
        pylab.axis('off')
        pylab.savefig(name, dpi=160)
    def create_pie_chart(self, snapshot, filename=''):
        """
        Create a pie chart that depicts the distribution of the allocated
        memory for a given `snapshot`. The chart is saved to `filename`.
        """
        try:
            from pylab import figure, title, pie, axes, savefig
            from pylab import sum as pylab_sum
        except ImportError:
            return self.nopylab_msg % ("pie_chart")

        # Don't bother illustrating a pie without pieces.
        if not snapshot.tracked_total:
            return ''

        classlist = []
        sizelist = []
        for k, v in list(snapshot.classes.items()):
            if v['pct'] > 3.0:
                classlist.append(k)
                sizelist.append(v['sum'])
        sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
        classlist.insert(0, 'Other')
        #sizelist = [x*0.01 for x in sizelist]

        title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
        figure(figsize=(8, 8))
        axes([0.1, 0.1, 0.8, 0.8])
        pie(sizelist, labels=classlist)
        savefig(filename, dpi=50)

        return self.chart_tag % (self.relative_path(filename))
Example #5
0
def compareAnimals(animals, precision):
    """Assumes animals is a list of animals, precision an int >= 0
       Builds a table of Euclidean distance between each animal"""
    #Get labels for columns and rows
    columnLabels = []
    for a in animals:
        columnLabels.append(a.getName())
    rowLabels = columnLabels[:]
    tableVals = []
    #Get distances between pairs of animals
    #For each row
    for a1 in animals:
        row = []
        #For each column
        for a2 in animals:
            if a1 == a2:
                row.append('--')
            else:
                distance = a1.distance(a2)
                row.append(str(round(distance, precision)))
        tableVals.append(row)
    #Produce table
    table = pylab.table(rowLabels = rowLabels,
                        colLabels = columnLabels,
                        cellText = tableVals,
                        cellLoc = 'center',
                        loc = 'center',
                        colWidths = [0.2]*len(animals))
    table.scale(1, 2.5)
    pylab.axis('off') #Don't display x and y-axes
    pylab.savefig('distances')
Example #6
0
def trace(data, name, format='png', datarange=(None, None), suffix='', path='./', rows=1, columns=1, 
    num=1, last=True, fontmap = None, verbose=1):
    """
    Generates trace plot from an array of data.

    :Arguments:
        data: array or list
            Usually a trace from an MCMC sample.

        name: string
            The name of the trace.
            
        datarange: tuple or list
            Preferred y-range of trace (defaults to (None,None)).

        format (optional): string
            Graphic output format (defaults to png).

        suffix (optional): string
            Filename suffix.

        path (optional): string
            Specifies location for saving plots (defaults to local directory).
            
        fontmap (optional): dict
            Font map for plot.

    """

    if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4}

    # Stand-alone plot or subplot?
    standalone = rows==1 and columns==1 and num==1

    if standalone:
        if verbose>0:
            print_('Plotting', name)
        figure()

    subplot(rows, columns, num)
    pyplot(data.tolist())
    ylim(datarange)

    # Plot options
    title('\n\n   %s trace'%name, x=0., y=1., ha='left', va='top', fontsize='small')

    # Smaller tick labels
    tlabels = gca().get_xticklabels()
    setp(tlabels, 'fontsize', fontmap[rows/2])

    tlabels = gca().get_yticklabels()
    setp(tlabels, 'fontsize', fontmap[rows/2])

    if standalone:
        if not os.path.exists(path):
            os.mkdir(path)
        if not path.endswith('/'):
            path += '/'
        # Save to file
        savefig("%s%s%s.%s" % (path, name, suffix, format))
Example #7
0
def geweke_plot(data, name, format='png', suffix='-diagnostic', path='./', fontmap = None, 
    verbose=1):
    # Generate Geweke (1992) diagnostic plots

    if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4}

    # Generate new scatter plot
    figure()
    x, y = transpose(data)
    scatter(x.tolist(), y.tolist())

    # Plot options
    xlabel('First iteration', fontsize='x-small')
    ylabel('Z-score for %s' % name, fontsize='x-small')

    # Plot lines at +/- 2 sd from zero
    pyplot((nmin(x), nmax(x)), (2, 2), '--')
    pyplot((nmin(x), nmax(x)), (-2, -2), '--')

    # Set plot bound
    ylim(min(-2.5, nmin(y)), max(2.5, nmax(y)))
    xlim(0, nmax(x))

    # Save to file
    if not os.path.exists(path):
        os.mkdir(path)
    if not path.endswith('/'):
        path += '/'
    savefig("%s%s%s.%s" % (path, name, suffix, format))
def plot_Barycenter(dataset_name, feat, unfeat, repo):

    if dataset_name==MNIST:
        _, _, test=get_data(dataset_name, repo, labels=True)
        xtest1,_,_, labels,_=test
    else:
        _, _, test=get_data(dataset_name, repo, labels=False)
        xtest1,_,_ =test
        labels=np.zeros((len(xtest1),))
    # get labels
    def bary_wdl2(index): return _bary_wdl2(index, xtest1, feat, unfeat)
    
    n=xtest1.shape[-1]
    
    num_class = (int)(max(labels)+1)
    barys=[bary_wdl2(np.where(labels==i)) for i in range(num_class)]
    pl.figure(1, (num_class, 1))
    for i in range(num_class):
        pl.subplot(1,10,1+i)
        pl.imshow(barys[i][0,0,:,:],cmap='Blues',interpolation='nearest')
        pl.xticks(())
        pl.yticks(())
        if i==0:
            pl.ylabel('DWE Bary.')
        if num_class >1:
            pl.title('{}'.format(i))
    pl.tight_layout(pad=0,h_pad=-2,w_pad=-2) 
    pl.savefig("imgs/{}_dwe_bary.pdf".format(dataset_name))
def plotKerasExperimentcifar10():

    index = 5
    for experiment_number in range(1,index+1):
        outputPath_part_final = os.path.realpath( "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/hyperopt_experiment_withoutparam_accuracy" + str(experiment_number) + ".txt")
        output_plot = os.path.realpath(
                "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/plotErrorCurve" + str(experiment_number) + ".pdf")

        df = pd.read_csv(outputPath_part_final,delimiter='\t',header=None)
        df.drop(df.columns[[600]], axis=1, inplace=True)

        i=1
        epochnum = []
        while i<=250:
            epochnum.append(i)
            i = i+1
        i=0
        while i<10:
            df_1=df[df.columns[0:250]].ix[i]
            np.reshape(df_1, (1,250))
            plt.plot(epochnum,df_1)

            i = i+1
        # plt.show()
        # plt.show()
        plt.savefig(output_plot)
        plt.close()
Example #10
0
 def aa(current_data):
     title_hours(current_data)
     if 0:
         from pylab import savefig
         fname = 'pacific%s.png' % str(current_data.frameno).zfill(4)
         savefig(fname)
         print 'Saved ',fname
def simplegrid():

    nzones = 7

    gr = gpu.grid(nzones, xmin=0, xmax=1)

    gpu.drawGrid(gr, edgeTicks=0)

    # label a few cell-centers
    gpu.labelCenter(gr, nzones/2, r"$i$")
    gpu.labelCenter(gr, nzones/2-1, r"$i-1$")
    gpu.labelCenter(gr, nzones/2+1, r"$i+1$")

    # label a few edges
    gpu.labelEdge(gr, nzones/2, r"$i-1/2$")
    gpu.labelEdge(gr, nzones/2+1, r"$i+1/2$")


    # draw an average quantity
    gpu.drawCellAvg(gr, nzones/2, 0.4, color="r")
    gpu.labelCellAvg(gr, nzones/2, 0.4, r"$\,\langle a \rangle_i$", color="r")

    pylab.axis([gr.xmin-1.5*gr.dx,gr.xmax+1.5*gr.dx, -0.25, 1.5])
    pylab.axis("off")

    pylab.subplots_adjust(left=0.05,right=0.95,bottom=0.05,top=0.95)

    f = pylab.gcf()
    f.set_size_inches(10.0,2.5)


    pylab.savefig("simplegrid2.png")
    pylab.savefig("simplegrid2.eps")
Example #12
0
def makeContourPlot(scores, average, HEIGHT, WIDTH, outputId, maskId, plt_title, outputdir, barcodeId=-1, vmaxVal=100):
    pylab.bone()
    #majorFormatter = FormatStrFormatter('%.f %%')
    #ax = pylab.gca()
    #ax.xaxis.set_major_formatter(majorFormatter)
    
    pylab.figure()
    ax = pylab.gca()
    ax.set_xlabel(str(WIDTH) + ' wells')
    ax.set_ylabel(str(HEIGHT) + ' wells')
    ax.autoscale_view()
    pylab.jet()
    
    pylab.imshow(scores,vmin=0, vmax=vmaxVal, origin='lower')
    pylab.vmin = 0.0
    pylab.vmax = 100.0
    ticksVal = getTicksForMaxVal(vmaxVal)
    pylab.colorbar(format='%.0f %%',ticks=ticksVal)
    print "'%s'" % average
    if(barcodeId!=-1):
        if(barcodeId==0): maskId = "No Barcode Match,"
        else:             maskId = "Barcode Id %d," % barcodeId
    if plt_title != '': maskId = '%s\n%s' % (plt_title,maskId)
    print "Checkpoint A"
    pylab.title('%s Loading Density (Avg ~ %0.f%%)' % (maskId, average))
    pylab.axis('scaled')
    print "Checkpoint B"
    pngFn = outputdir+'/'+outputId+'_density_contour.png'
    print "Try save to", pngFn;
    pylab.savefig(pngFn, bbox_inches='tight')
    print "Plot saved to", pngFn;
Example #13
0
def plotEventFlop(library, num, eventNames, sizes, times, events, filename = None):
  from pylab import legend, plot, savefig, semilogy, show, title, xlabel, ylabel
  import numpy as np

  arches = sizes.keys()
  bs     = events[arches[0]].keys()[0]
  data   = []
  names  = []
  for event, color in zip(eventNames, ['b', 'g', 'r', 'y']):
    for arch, style in zip(arches, ['-', ':']):
      if event in events[arch][bs]:
        names.append(arch+'-'+str(bs)+' '+event)
        data.append(sizes[arch][bs])
        data.append(1e-3*np.array(events[arch][bs][event])[:,1])
        data.append(color+style)
      else:
        print 'Could not find %s in %s-%d events' % (event, arch, bs)
  semilogy(*data)
  title('Performance on '+library+' Example '+str(num))
  xlabel('Number of Dof')
  ylabel('Computation Rate (GF/s)')
  legend(names, 'upper left', shadow = True)
  if filename is None:
    show()
  else:
    savefig(filename)
  return
Example #14
0
def create_figure():
    psd = test_correlog()
    f = linspace(-0.5, 0.5, len(psd))

    psd = cshift(psd, len(psd)/2)
    plot(f, 10*log10(psd/max(psd)))
    savefig('psd_corr.png')
def plot_sphere_x( s, fname ):
  """ put plot of ionization fractions from sphere `s` into fname """

  plt.figure()
  s.Edges.units = 'kpc'
  s.r_c.units = 'kpc'
  xx = s.r_c
  L = s.Edges[-1]

  plt.plot( xx, np.log10( s.xHe1 ),
            color='green', ls='-', label = r'$x_{\rm HeI}$' )
  plt.plot( xx, np.log10( s.xHe2 ),
            color='green', ls='--', label = r'$x_{\rm HeII}$' )
  plt.plot( xx, np.log10( s.xHe3 ),
            color='green', ls=':', label = r'$x_{\rm HeIII}$' )

  plt.plot( xx, np.log10( s.xH1 ),
            color='red', ls='-', label = r'$x_{\rm HI}$' )
  plt.plot( xx, np.log10( s.xH2 ),
            color='red', ls='--', label = r'$x_{\rm HII}$' )

  plt.xlim( -L/20, L+L/20 )
  plt.xlabel( 'r_c [kpc]' )

  plt.ylim( -4.5, 0.2 )
  plt.ylabel( 'log 10 ( x )' )

  plt.grid()
  plt.legend(loc='best', ncol=2)
  plt.tight_layout()
  plt.savefig( 'doc/img/x_' + fname )
def plot_heatingrate(data_dict, filename, do_show=True):
    pl.figure(201)
    color_list = ['b','r','g','k','y','r','g','b','k','y','r',]
    fmtlist = ['s','d','o','s','d','o','s','d','o','s','d','o']
    result_dict = {}
    for key in data_dict.keys():
        x = data_dict[key][0]
        y = data_dict[key][1][:,0]
        y_err = data_dict[key][1][:,1]

        p0 = np.polyfit(x,y,1)
        fit = LinFit(np.array([x,y,y_err]).transpose(), show_graph=False)
        p1 = [0,0]
        p1[0] = fit.param_dict[0]['Slope'][0]
        p1[1] = fit.param_dict[0]['Offset'][0]
        print fit
        x0 = np.linspace(0,max(x))
        cstr = color_list.pop(0)
        fstr = fmtlist.pop(0)
        lstr = key + " heating: {0:.2f} ph/ms".format((p1[0]*1e3)) 
        pl.errorbar(x/1e3,y,y_err,fmt=fstr + cstr,label=lstr)
        pl.plot(x0/1e3,np.polyval(p0,x0),cstr)
        pl.plot(x0/1e3,np.polyval(p1,x0),cstr)
        result_dict[key] = 1e3*np.array(fit.param_dict[0]['Slope'])
    pl.xlabel('Heating time (ms)')
    pl.ylabel('nbar')
    if do_show:
        pl.legend()
        pl.show()
    if filename != None:
        pl.savefig(filename)
    return result_dict
Example #17
0
def Doplots_monthly(mypathforResults,PlottingDF,variable_to_fill, Site_ID,units,item):   
    ANN_label=str(item+"_NN")     #Do Monthly Plots
    print "Doing MOnthly  plot"
    #t = arange(1, 54, 1)
    NN_label='Fc'
    Plottemp = PlottingDF[[NN_label,item]][PlottingDF['day_night']!=1]
    #Plottemp = PlottingDF[[NN_label,item]].dropna(how='any')
    figure(1)
    pl.title('Nightime ANN v Tower by year-month for '+item+' at '+Site_ID)

    try:
	xdata1a=Plottemp[item].groupby([lambda x: x.year,lambda x: x.month]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[NN_label].groupby([lambda x: x.year,lambda x: x.month]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')    
    pl.xlabel('Year - Month')       
    pl.legend()
    pl.savefig(mypathforResults+'/ANN and Tower plots by year and month for variable '+item+' at '+Site_ID)
    #pl.show()
    pl.close()
    time.sleep(1)
Example #18
0
    def swi_histogram(self,dir,name,measure,dpi=80,width=8,height=6,b_left='0.1',b_bot='0.1',b_top='0.1',b_right='0.1',bins='20'):
        s=ccm.stats.Stats('%s/%s'%(dir,name))
        data=s.get_raw(measure) 
        
        bins=int(bins)
        
           

        pylab.figure(figsize=(float(width),float(height)))
        try: b_left=float(b_left)
        except: b_left=0.1
        try: b_right=float(b_right)
        except: b_right=0.1
        try: b_top=float(b_top)
        except: b_top=0.1
        try: b_bot=float(b_bot)
        except: b_bot=0.1
        pylab.axes((b_left,b_bot,1.0-b_left-b_right,1.0-b_top-b_bot))
        
        
        pylab.hist(data,bins=bins)

            
        
        img=StringIO.StringIO()
        if type(dpi) is list: dpi=dpi[-1]
        pylab.savefig(img,dpi=int(dpi),format='png')
        return 'image/png',img.getvalue()
Example #19
0
 def testTelescope(self):
     import matplotlib
     matplotlib.use('AGG')
     import matplotlib.mlab as ml
     import pylab as pl
     import time        
     w0 = 8.0
     k = 2*np.pi/3.0
     gb = GaussianBeam(w0, k)
     lens = ThinLens(150, 150)
     gb2 = lens*gb
     self.assertAlmostEqual(gb2._z0, gb._z0 + 2*150.0)
     lens2 = ThinLens(300, 600)
     gb3 = lens2*gb2
     self.assertAlmostEqual(gb3._z0, gb2._z0 + 2*300.0)
     self.assertAlmostEqual(gb._w0, gb3._w0/2.0)
     z = np.arange(0, 150)
     z2 = np.arange(150, 600)
     z3 = np.arange(600, 900)
     pl.plot(z, gb.w(z, k), z2, gb2.w(z2, k), z3, gb3.w(z3, k))
     pl.grid()
     pl.xlabel('z')
     pl.ylabel('w')
     pl.savefig('testTelescope1.png')
     time.sleep(0.1)
     pl.close('all')        
Example #20
0
def plotslice(pos,filename='',boxsize=100.):
    ng = pos.shape[0]
    M.clf()
    M.scatter(pos[ng/4,:,:,1].flatten(),pos[ng/4,:,:,2].flatten(),s=1.,lw=0.)
    M.axis('tight')
    if filename != '':
        M.savefig(filename)
def window_fn_matrix(Q,N,num_remov=None,save_tag=None,lms=None):
    Q = n.matrix(Q); N = n.matrix(N)
    Ninv = uf.pseudo_inverse(N,num_remov=None) # XXX want to remove dynamically
    #print Ninv 
    info = n.dot(Q.H,n.dot(Ninv,Q))
    M = uf.pseudo_inverse(info,num_remov=num_remov)
    W = n.dot(M,info)

    if save_tag!=None:
        foo = W[0,:]
        foo = n.real(n.array(foo))
        foo.shape = (foo.shape[1]),
        print foo.shape
        p.scatter(lms[:,0],foo,c=lms[:,1],cmap=mpl.cm.PiYG,s=50)
        p.xlabel('l (color is m)')
        p.ylabel('W_0,lm')
        p.title('First Row of Window Function Matrix')
        p.colorbar()
        p.savefig('{0}/{1}_W.pdf'.format(fig_loc,save_tag))
        p.clf()

        print 'W ',W.shape
        p.imshow(n.real(W))
        p.title('Window Function Matrix')
        p.colorbar()
        p.savefig('{0}/{1}_W_im.pdf'.format(fig_loc,save_tag))
        p.clf()


    return W
Example #22
0
def exercise_4_1():

    exp_t = np.load('exp_t.npy')
    exp_somav = np.load('exp_v.npy')
    exp_somav -=  exp_somav[0]
    exp_somav /= abs(exp_somav.max())

    soma_rall, dend_rall = return_ball_and_stick_soma()
    stim = insert_current_clamp(soma_rall(0.5))
    t, v_rall = run_simulation(soma_rall(0.5))
    v_rall -= v_rall[0]
    v_rall /= abs(v_rall.max())

    soma_ball = return_ball_soma()
    stim_ball = insert_current_clamp(soma_ball(0.5))
    t_ball, v_ball = run_simulation(soma_ball(0.5))
    v_ball -= v_ball[0]
    v_ball /= abs(v_ball.max())

    fig = plt.figure()
    ax1 = fig.add_subplot(111, xlabel="Time [ms]", ylabel="Voltage [mV]")
    ax1.plot(t, exp_somav, 'gray', label='"Experiment"')
    ax1.plot(t, v_rall, 'g', label='Rall')
    ax1.plot(t_ball, v_ball, 'b', label='ball')
    plt.legend(loc=4, frameon=False)

    plt.savefig('exercise_4_1_.png')
    plt.show()
Example #23
0
def manhattonPlot(phenotype_ID, pvalues_lm, ouFprefix, pos, chromBounds):
    for ip, p_ID in enumerate(phenotype_ID):
        pl.figure(figsize=[12,4])
        plot_manhattan(posCum=pos['pos_cum'],pv=pvalues_lm[p_ID].values,chromBounds=chromBounds,thr_plotting=0.05)
        pl.title(p_ID)
        pl.savefig(ouFprefix + '.' + p_ID + '.pdf')
        pl.close('all')
Example #24
0
def bar_plot_raw(inF):
    ouF = inF + '.pdf'
    X = []
    Y = []
    inFile = open(inF)
    for line in inFile:
        line = line.strip()
        fields = line.split('\t')
        X.append(int(fields[0]))
        #Y.append(math.log(int(fields[1])+1,2))
        Y.append(int(fields[1]))
    fig = pl.figure()
    N = len(X)
    ax = fig.add_subplot(111)
    ax.set_xlim(0,N + 1)
    ax.set_xticks(range(0,N+2))
    ax.set_xticklabels([0,1] + ['']*(N-1) + [max(X)])
    ax.bar(range(1,N+1), Y, align='center')
    ax.set_xlabel('Start position in the protein')
    ax.set_ylabel('Number of peptides')
    pl.setp(ax.get_xticklines(),visible=False)
    pl.setp(ax.get_xticklabels(),fontsize=6)
    ax.get_children()[2].set_color('g')
    ax.get_children()[3].set_color('r')
    pl.savefig(ouF)
    inFile.close()
Example #25
0
def SingleTraitLM(inF1, inF2, ouF):

    geno_reader = gr.genotype_reader_tables(inF1)
    pheno_reader = phr.pheno_reader_tables(inF2)
    dataset = data.QTLData(geno_reader=geno_reader,pheno_reader=pheno_reader)
    geno = dataset.getGenotypes()
    position = dataset.getPos()
    pos,chromBounds = data_util.estCumPos(position=position,offset=0)

    ouFile = open(ouF, 'w')

    P_max = len(dataset.phenotype_ID)
    phenotype_ID = dataset.phenotype_ID[0:P_max]


    for p_ID in phenotype_ID[0:]:
        #phenotype_vals, sample_idx = dataset.getPhenotypes([pI], center=False)
        phenotype_vals, sample_idx = dataset.getPhenotypes([p_ID])
        phenotype_vals_ranks = preprocess.rankStandardizeNormal(phenotype_vals.values)
        lm_ranks = qtl.test_lm(snps=geno[sample_idx],pheno=phenotype_vals_ranks)
        pvalues_lm_ranks = pd.DataFrame(data=lm_ranks.pvalues.T,index=dataset.geno_ID,columns=[p_ID])
        pvt = lm_ranks.pvalues.T
        for i in xrange(pvt.shape[0]):
            p = pvt[i,0]
            if p <= SIG:
                ouFile.write('\t'.join([position['chrom'][i], str(position['pos'][i]), str(p), p_ID]) + '\n')
    ouFile.close()

    manhattonPlot(['NMD'],pvalues_lm_ranks,inF2,pos, chromBounds)

    pl.figure(figsize=[12,4])
    qqplot(pvalues_lm_ranks['NMD'].values)
    pl.savefig('pvalues-qqplot.pdf')
Example #26
0
def makeimg(wav):
	global callpath
	global imgpath

	fs, frames = wavfile.read(os.path.join(callpath, wav))
	
	pylab.ion()

	# generate specgram
	pylab.figure(1)
	
	# generate specgram
	pylab.specgram(
		frames,
		NFFT=256, 
		Fs=22050, 
		detrend=pylab.detrend_none,
		window=numpy.hamming(256),
		noverlap=192,
		cmap=pylab.get_cmap('Greys'))
	
	x_width = len(frames)/fs
	
	pylab.ylim([0,11025])
	pylab.xlim([0,round(x_width,3)-0.006])
	
	img_path = os.path.join(imgpath, wav.replace(".wav",".png"))

	pylab.savefig(img_path)
	
	return img_path
Example #27
0
def density(ouF, bandwidth):
    AX = []
    df = pd.read_table('Mouse_Gene_Promoter_Cov_ProteinCoding-Norm', header=0)
    
    Sample = df.columns[4:]
    #Sample2 = Sample
    Sample2 = [' '.join(x.split('_')[0:-1]) for x in df.columns[4:]]
    
    fig = plt.figure()
    ax = fig.add_axes([0.15,0.15,0.8,0.8])
    
    for i in range(4,df.shape[1]):
        AX.append(sns.kdeplot(np.log2(df.ix[:,i]), shade=True, color=LineColor(i-4), legend=True, label=GetLabel(i-4, Sample2), bw=bandwidth))
    '''
    patch1 = mpatches.Patch(color='r', label='Tspan8 negative MHCII low')
    patch2 = mpatches.Patch(color='b', label='Tspan8 negative MHCII high')
    patch3 = mpatches.Patch(color='g', label='Tspan8 positive MHCII low')
    patch4 = mpatches.Patch(color='m', label='Tspan8 positive MHCII high')
    plt.legend(handles=[patch1, patch2, patch3, patch4])
    '''
    ax.set_xlabel('Normalized number of reads (log2), bandwidth=%s'%bandwidth)
    ax.set_ylabel('Density of gene numbers')
    ax.set_xlim(0, ax.get_xlim()[1])
    
    plt.savefig(ouF +'-bw_'+ str(bandwidth) + '.pdf')
Example #28
0
def draw(inF):
    G = nx.Graph()

    inFile = open(inF)
    S = set()
    for line in inFile:
        line = line.strip()
        fields = line.split('\t')
        for item in fields:
            S.add(item)
    inFile.close()

    L = list(S)
    G.add_nodes_from(L)

    LC = []
    for x in L:
        if x == 'EGR1' or x == 'RBM20':
            LC.append('r')
        else:
            LC.append('w')

    inFile = open(inF)
    for line in inFile:
        line = line.strip()
        fields = line.split('\t')
        for i in range(len(fields)-1):
            G.add_edge(fields[i], fields[i+1])
    inFile.close()
    nx.draw_networkx(G,pos=nx.spring_layout(G), node_size=800, font_size=6, node_color=LC)
    limits=plt.axis('off')
    plt.savefig(inF + '.pdf')
Example #29
0
    def __call__(self, n):
        if len(self.f.shape) == 3:
            # f = f[x,v,t], 2 dim in phase space
            ft = self.f[n,:,:]
            pylab.pcolormesh(self.X, self.V, ft.T, cmap = 'jet')
            pylab.colorbar()
            pylab.clim(0,0.38) # for Landau test case
            pylab.grid()
            pylab.axis([self.xmin, self.xmax, self.ymin, self.ymax])
            pylab.xlabel('$x$', fontsize = 18)
            pylab.ylabel('$v$', fontsize = 18)
            pylab.title('$N_x$ = %d, $N_v$ = %d, $t$ = %2.1f' % (self.x.N, self.v.N, self.it*self.t.width))
            pylab.savefig(self.path + self.filename)
            pylab.clf()
            return None

        if len(self.f.shape) == 2:
            # f = f[x], 1 dim in phase space
            ft = self.f[n,:]
            pylab.plot(self.x.gridvalues,ft,'ob')
            pylab.grid()
            pylab.axis([self.xmin, self.xmax, self.ymin, self.ymax])
            pylab.xlabel('$x$', fontsize = 18)
            pylab.ylabel('$f(x)$', fontsize = 18)
            pylab.savefig(self.path + self.filename)
            return None
Example #30
0
 def img_create(self):
     '''
     Create the output jpg of the file.
     :return:
     '''
     pylab.imshow(self._dcm.pixel_array, cmap=pylab.cm.bone)
     pylab.savefig(self._str_outputImageFile)
Example #31
0
        cnt = cnt + 1
        pathname = scenrio + out_name + '/%03d.jpg' % cnt
        cv2.imwrite(pathname, image3)
        # cv2.imshow('result',image3)
        # cv2.waitKey(2)
    tmp_IOU = 0
    for i in range(len(IOU_set)):
        tmp_IOU = tmp_IOU + IOU_set[i]

    if len(IOU_set) != 1:
        average_iou = tmp_IOU / len(IOU_set)
    #     ss2=0
    #     for i in range(len(IOU_set)-1):
    #         ss2 = ss2+(IOU_set[i]-average_iou)*(IOU_set[i]-average_iou)
    #     ss2 = math.sqrt(ss2 / (len(IOU_set) - 1))
    print(IOU_set)

    pl.plot(np.arange(1, len(IOU_set) + 1, 1), IOU_set)
    pl.xlabel("Image number")
    pl.ylabel("IOU value")
    pl.title("ATR images IOU diagram")
    pl.savefig(scenrio + out_name + '/iou.jpg')
    pl.close()
    # pl.show()
    # IOU_set.append('avg:')
    IOU_set.append(average_iou)
    # IOU_set.append('ss2:')
    # IOU_set.append(ss2)
    np.savetxt(scenrio + out_name + '/iou.txt', IOU_set)
    print(average_iou)

gen_model = generator()
dis_model = discriminator()

serializers.load_npz('generator.model', gen_model)
serializers.load_npz('discriminator.model', dis_model)

gen_model.to_gpu()
dis_model.to_gpu()

gen_opt = set_optimizer(gen_model, 1e-4, 0.5)
dis_opt = set_optimizer(dis_model, 1e-4, 0.5)

z1 = xp.random.uniform(-1, 1, (1, 100), dtype=np.float32)
z2 = xp.random.uniform(-1, 1, (1, 100), dtype=np.float32)
inter = (z2 - z1) / 20.0

for i in range(21):
    pylab.rcParams['figure.figsize'] = (1.0, 1.0)
    pylab.clf()
    vec = z1 + i * inter
    z = Variable(vec)
    with chainer.using_config('train', False):
        x = gen_model(z)
    x = x.data.get()
    tmp = ((np.vectorize(clip_img)(x[0, :, :, :]) + 1) / 2).transpose(1, 2, 0)
    pylab.imshow(tmp)
    pylab.axis('off')
    pylab.savefig('%s/morphing_%d.png' % (image_vec_dir, i))
Example #33
0
    print(pred.cputime(3., 10., 50.))

    # This one won't work
    #pred(-999)

    # Extract the Run 1 metadata and evaluate the predicted CPU times
    run1meta = pd.read_csv(
        os.path.join(os.environ['TWINKLES_DIR'], 'data',
                     'run1_metadata_v6.csv'),
        usecols=['filter', 'moonalt', 'moonphase', 'cputime_fell'])

    filter = np.array(run1meta['filter'])
    moonalt = np.array(run1meta['moonalt'])
    moonphase = np.array(run1meta['moonphase'])
    actual = np.array(run1meta['cputime_fell'])

    predicted = np.zeros(filter.size, dtype=float)

    for i in range(filter.size):
        predicted[i] = pred.cputime(filter[i], moonalt[i], moonphase[i])

    plt.scatter(np.log10(actual), np.log10(predicted))
    plt.plot([4, 6.5], [4, 6.5])
    pylab.ylim([4, 6.5])
    pylab.xlim([4, 6.5])
    plt.xlabel('log10(Actual Fell CPU time, s)')
    plt.ylabel('log10(Predicted Fell CPU time, s)')
    plt.title('Run 1 CPU Times Predicted vs. Actual')
    pylab.savefig('predicted_vs_actual.png', bbox_inches='tight')
    plt.show()
Example #34
0
	def train(      self, x, x_valid,
					epochs, num_batches,
					print_every = 1,
					learning_rate = 3e-4,
					beta1 = 0.9,
					beta2 = 0.999,
					seed = 31415,
					stop_iter = 100,
					save_path = None,
					load_path = None,
					draw_img = 1    ):

		self.num_examples = x.shape[0]
		self.num_batches = num_batches

		assert self.num_examples % self.num_batches == 0, '#Examples % #Batches != 0'

		self.batch_size = self.num_examples // self.num_batches

		''' Session and Summary '''
		if save_path is None:
			self.save_path = 'checkpoints/model_VAE_{}-{}_{}.cpkt'.format(learning_rate,self.batch_size,time.time())
		else:
			self.save_path = save_path

		np.random.seed(seed)
		tf.set_random_seed(seed)

		with self.G.as_default():

			self.optimiser = tf.train.AdamOptimizer( learning_rate = learning_rate, beta1 = beta1, beta2 = beta2 )
			self.train_op = self.optimiser.minimize( self.cost )
			init = tf.initialize_all_variables()
			self._test_vars = None

		with self.session as sess:

			sess.run(init)
			if load_path == 'default': self.saver.restore( sess, self.save_path )
			elif load_path is not None: self.saver.restore( sess, load_path )

			training_cost = 0.
			best_eval_log_lik = - np.inf
			stop_counter = 0

			for epoch in range(epochs):

				''' Shuffle Data '''
				np.random.shuffle( x )

				''' Training '''

				for x_batch in utils.feed_numpy( self.batch_size, x ):

					training_result = sess.run( [self.train_op, self.cost],
											feed_dict = { self.x: x_batch } )

					training_cost = training_result[1]

				''' Evaluation '''

				stop_counter += 1

				if epoch % print_every == 0:

					test_vars = tf.get_collection(bookkeeper.GraphKeys.TEST_VARIABLES)
					if test_vars:
						if test_vars != self._test_vars:
							self._test_vars = list(test_vars)
							self._test_var_init_op = tf.initialize_variables(test_vars)
						self._test_var_init_op.run()


					eval_log_lik, x_recon_eval = \
						sess.run( [self.eval_log_lik, self.x_recon_eval],
									feed_dict = { self.x: x_valid } )

					if eval_log_lik > best_eval_log_lik:

						best_eval_log_lik = eval_log_lik
						self.saver.save( sess, self.save_path )
						stop_counter = 0

					utils.print_metrics( 	epoch+1,
											['Training', 'cost', training_cost],
											['Validation', 'log-likelihood', eval_log_lik] )

					if draw_img > 0 and epoch % draw_img == 0:

						import matplotlib
						matplotlib.use('Agg')
						import pylab
						import seaborn as sns

						five_random = np.random.random_integers(x_valid.shape[0], size = 5)
						x_sample = x_valid[five_random]
						x_recon_sample = x_recon_eval[five_random]

						sns.set_style('white')
						f, axes = pylab.subplots(5, 2, figsize=(8,12))
						for i,row in enumerate(axes):

							row[0].imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1)
							im = row[1].imshow(x_recon_sample[i].reshape(28, 28), vmin=0, vmax=1,
								cmap=sns.light_palette((1.0, 0.4980, 0.0549), input="rgb", as_cmap=True))

							pylab.setp([a.get_xticklabels() for a in row], visible=False)
							pylab.setp([a.get_yticklabels() for a in row], visible=False)

						f.subplots_adjust(left=0.0, right=0.9, bottom=0.0, top=1.0)
						cbar_ax = f.add_axes([0.9, 0.1, 0.04, 0.8])
						f.colorbar(im, cax=cbar_ax, use_gridspec=True)

						pylab.tight_layout()
						pylab.savefig('img/recon-'+str(epoch)+'.png', format='png')
						pylab.clf()
						pylab.close('all')

				if stop_counter >= stop_iter:
					print('Stopping VAE training')
					print('No change in validation log-likelihood for {} iterations'.format(stop_iter))
					print('Best validation log-likelihood: {}'.format(best_eval_log_lik))
					print('Model saved in {}'.format(self.save_path))
					break
Example #35
0
        sys.exit(1)
else:
    labels = None

for fi in range(0, len(filenames)):
    f = filenames[fi]
    process_file(f)
    for i in range(0, len(x)):
        if first_only[i] and fi != 0:
            x[i] = []
            y[i] = []
    if labels:
        lab = labels[fi * len(fields):(fi + 1) * len(fields)]
    else:
        lab = fields[:]
    if args.multi:
        col = colors[:]
    else:
        col = colors[fi * len(fields):]
    plotit(x, y, lab, colors=col)
    for i in range(0, len(x)):
        x[i] = []
        y[i] = []
if args.output is None:
    pylab.show()
    pylab.draw()
    raw_input('press enter to exit....')
else:
    pylab.legend(loc=2, prop={'size': 8})
    pylab.savefig(args.output, bbox_inches='tight', dpi=200)
Example #36
0
# don't allow overwriting of these, just in case
try:
    x_profiles
except NameError:
    x_profiles = []
    z_profiles = []
    S_profiles = []
    A_profiles = []

# plot init conds
if make_output_plots:
    mg = fr.route_flow(grid=mg)
    pylab.figure('long_profile_anim')
    ylim([0, y_max])
    prf.analyze_channel_network_and_plot(mg)
    savefig('0profile_anim_init.png')
    close('long_profile_anim')

(profile_IDs, dists_upstr) = prf.analyze_channel_network_and_plot(mg)
start_node = [profile_IDs[0]]

time_on = time()
#perform the loops:
for i in range(nt):
    #print 'loop ', i
    mg.at_node['topographic__elevation'][mg.core_nodes] += uplift_per_step
    mg = fr.route_flow()
    #mg.calculate_gradient_across_cell_faces(mg.at_node['topographic__elevation'])
    #neighbor_slopes = mg.calculate_gradient_along_node_links(mg.at_node['topographic__elevation'])
    #mean_slope = np.mean(np.fabs(neighbor_slopes),axis=1)
    #max_slope = np.max(np.fabs(neighbor_slopes),axis=1)
Example #37
0
def feature_extraction():
    """
    This method restores a TensorFlow checkpoint file (.ckpt) and rebuilds inference
    model with restored parameters. From then on you can basically use that model in
    any way you want, for instance, feature extraction, finetuning or as a submodule
    of a larger architecture. However, this method should extract features from a
    specified layer and store them in data files such as '.h5', '.npy'/'.npz'
    depending on your preference. You will use those files later in the assignment.

    Args:
        [optional]
    Returns:
        None
    """

    ########################
    # PUT YOUR CODE HERE  #
    ########################
    #model = convnet.ConvNet(n_classes=10)

    #x = tf.placeholder(tf.float32, [None, 32, 32, 3])
    #y = tf.placeholder(tf.float32, [None, 10])

    #logits = model.inference(x)

    #accuracy = model.accuracy(logits, y)

    #init = tf.initialize_all_variables()

    #saver = tf.train.Saver()

    with tf.Session() as sess:
  	#saver.restore(sess, "checkpoints/convnet")

    	cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
    	x_test, y_test = cifar10.test.images, cifar10.test.labels

    	#acc, fc2_out, fc1_out, flatten = sess.run([accuracy, model.fc2_out, model.fc1_out, model.flatten], feed_dict={x: x_test, y: y_test})
    	#print('Accuracy: ' + str(acc))
    
    
    	tsne = manifold.TSNE(n_components=2, random_state=0)
    	#fc2_tsne = tsne.fit_transform(np.squeeze(fc2_out))
	#fc1_tsne = tsne.fit_transform(np.squeeze(fc1_out))
	#flatten_tsne = tsne.fit_transform(np.squeeze(flatten))
    	
	#fc2_tsne = np.load('fc2_tsne')
	#fc1_tsne = np.load('fc1_tsne')
	#flatten_tsne = np.load('flatten_tsne')
	#labels = np.argmax(y_test, axis=1)
	
    	#plt.figure(figsize=(25, 20))  #in inches

        #x = fc2_tsne[:,0]/np.linalg.norm(fc2_tsne[:,0])
	#y = fc2_tsne[:,1]/np.linalg.norm(fc2_tsne[:,1])
        #plt.scatter(x, y, c=labels)
	#plt.colorbar()
    	#plt.savefig('fc2_tsne_norm.png')

	#plt.figure(figsize=(25, 20))

        #x = fc1_tsne[:,0]/np.linalg.norm(fc1_tsne[:,0])
        #y = fc1_tsne[:,1]/np.linalg.norm(fc1_tsne[:,1])
        #plt.scatter(x, y, c=labels)
	#plt.colorbar()
        #plt.savefig('fc1_tsne_norm.png')
	
	#plt.figure(figsize=(25, 20))  #in inches

        #x = flatten_tsne[:,0]/np.linalg.norm(flatten_tsne[:,0])
        #y = flatten_tsne[:,1]/np.linalg.norm(flatten_tsne[:,1])
        #plt.scatter(x, y, c=labels)
	#plt.colorbar()
        #plt.savefig('flatten_tsne_norm.png')

	#fc2_tsne.dump('fc2_tsne')
	#fc1_tsne.dump('fc1_tsne')
	#flatten_tsne.dump('flatten_tsne')
        #print('fc1 scores: ')
	#_classify(fc1_tsne, labels)
	#print('fc2 scores: ')
	#_classify(fc2_tsne, labels)
	#print('flatten layer scores: ')
	#_classify(flatten_tsne, labels)


    model = siamese.Siamese()
    x1 = tf.placeholder(tf.float32, [None, 32, 32, 3])

    model.inference(x1, reuse=False)

    with tf.Session() as siamese_sess:
        init = tf.initialize_all_variables()

        saver = tf.train.Saver()
        saver.restore(siamese_sess, "checkpoints/siamese")


        dataset = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
        x_test, y_test = cifar10.test.images, cifar10.test.labels

        
        l2_out = siamese_sess.run(model.l2_out, feed_dict={x1: x_test})

        siamese_tsne = tsne.fit_transform(np.squeeze(l2_out))
        plt.figure(figsize=(25, 20))  #in inches
        labels = np.argmax(y_test, axis=1)

        x = siamese_tsne[:,0]/np.linalg.norm(siamese_tsne[:,0])
        y = siamese_tsne[:,1]/np.linalg.norm(siamese_tsne[:,1])
        plt.scatter(x, y, c=labels)
        plt.colorbar()
        plt.savefig('siamese_l2out.png')

        siamese_tsne.dump('siamese_tsne')
        print('siamese L2 scores: ')
        _classify(siamese_tsne, labels)
Example #38
0
import pickle
import pylab as plt

with open("data.pickle", "rb") as fr:
    data = pickle.load(fr)

n = len(data)

# Use latex
plt.rc('text', usetext=True)
plt.rc('font', family='serif')

# Plot
plt.figure(figsize=(10, 6), dpi=300)
plt.title(r"Title", fontsize=16)
plt.xlabel(r'$x', fontsize=14)
plt.ylabel(r'$y$', fontsize=14)
plt.legend(fontsize=12)
plt.grid()

x = data[0]
for i in range(1, n):
    plt.plot(x, data[i], label=r'$i=' + str(i) + '$')

plt.savefig("plot.png", dpi=300)
Example #39
0
def make_histogram_predef(filename,
                          columns,
                          bin_ranges,
                          bin_numbers,
                          file_lineskip=1,
                          titles=None,
                          xlabels=None,
                          savefig=False):
    """Read data from 'filename' in columns 'columns' and create a histogram from the result.

    This method reads the data and immediately bins the data to produce a histogram, without saving
    the data in memory. Uses bin_ranges and bin_numbers to create histogram bins.
    Can optionally subsample the file, reading only every file_lineskip line.
    Returns the histogram binned data in a dictionary of numpy arrays, so new plots could be created."""
    # filename = name of data file.
    # columns = column numbers to read from data file. Count starts at 1. List of ints.
    # bin_ranges = must be specified. gives lower/upper ranges for the histogram.
    #              List of pairs of values, same length as columns.
    # bin_numbers = must be specified. gives number of bins in histogram.
    #               gives number of bins in histogram. List of ints, same length as columns.
    # file_lineskip = read only lines which are multiples of file_lineskip.
    # Check input types.
    for c in columns:
        if not (isinstance(c, int)):
            raise Exception('Error - columns should be list of ints.')
    if (bin_numbers == None) | (bin_ranges == None):
        raise Exception(
            'Error - bin_ranges and bin_numbers have to be predefined.')
    if ((len(bin_numbers) != len(columns)) & (len(bin_numbers) > 1)):
        raise Exception(
            'Error - bin_numbers should be same length as columns or length 1.'
        )
    if len(bin_ranges) != len(columns):
        raise Exception(
            'Error - bin_ranges should be same length as columns. (2 pairs per list item).'
        )
    # Set up histogram bins and values to help assign values into histogram bins.
    histobins = {}
    histovals = {}
    minval = {}
    stepval = {}
    nbinmax = {}
    i = 0
    for c in columns:
        minval[c] = bin_ranges[i][0]
        hival = bin_ranges[i][1]
        stepval[c] = (hival - minval[c]) / float(bin_numbers[i])
        histobins[c] = numpy.arange(minval[c],
                                    hival + stepval[c],
                                    stepval[c],
                                    dtype='float')
        histovals[c] = numpy.zeros(len(histobins[c]), dtype='int')
        nbinmax[c] = len(histobins[c])
        i = i + 1
    # Set up for calculating some basic statistics for output.
    stats = {}
    statlist = ('data_min', 'data_max', 'data_ave', 'hist_min', 'hist_max',
                'hist_ave')
    for c in columns:
        stats[c] = {}
        stats[c]['data_min'] = 1e9
        stats[c]['data_max'] = -1e9
        stats[c]['data_ave'] = 0
    # Read the data.
    # Open data file.
    f = open(filename, 'r')
    line_num = 0
    for line in f:
        line_num = line_num + 1
        # Skip comment lines.
        if (line.startswith("#") | line.startswith("!")):
            continue
        # Skip the lines which are not multiples of file_lineskip (to enable subsampling file).
        #  i.e. if file_lineskip = 10, this will read only every tenth line.
        if (line_num % file_lineskip != 0):
            continue
        values = line.split()
        # If there are not enough values in the line, quit reading (assume end of file).
        if len(values) < max(columns):
            break
        # Assign data to histogram bins.
        for c in columns:
            dataval = float(values[c - 1])
            histidx = min(int((dataval - minval[c]) / stepval[c]),
                          nbinmax[c] - 1)
            histovals[c][histidx] = histovals[c][histidx] + 1
            # And calculate the min/max/ave of the data.
            stats[c]['data_min'] = min(dataval, stats[c]['data_min'])
            stats[c]['data_max'] = max(dataval, stats[c]['data_max'])
            stats[c]['data_ave'] = stats[c]['data_ave'] + dataval
    # Close file.
    f.close()
    # Print some basic output.
    for c in columns:
        print("# For column ", c, " used ", len(histovals[c]), " bins.")
        print("# And ", histovals[c].sum(), " values from the data file.")
        stats[c]['data_ave'] = stats[c]['data_ave'] / histovals[c].sum()
        stats[c]['hist_min'] = histovals[c].min()
        stats[c]['hist_max'] = histovals[c].max()
        stats[c]['hist_ave'] = histovals[c].sum() / float(len(histovals[c]))
    print("")
    writestring = "# column "
    for key in statlist:
        writestring = writestring + " %s " % (key)
    print(writestring)
    for c in columns:
        writestring = "c %d " % (c)
        for key in statlist:
            writestring = writestring + "%g " % (stats[c][key])
        print(writestring)
    # Make histogram plots.
    i = 0
    for c in columns:
        pylab.figure()
        pylab.bar(histobins[c], histovals[c], width=stepval[c], linewidth=0)
        if titles == None:
            pylab.title("%s Column %d" % (filename, c))
        else:
            pylab.title(titles[i])
        if xlabels != None:
            pylab.xlabel(xlabels[i])
        if savefig:
            figname = "hist_%d" % (c)
            pylab.savefig(figname + "." + figformat, format=figformat)
        i = i + 1
    return histobins, histovals
Example #40
0
def main():
    global no_char

    x_train, y_train, x_test, y_test, no_char = data_read_words()

    # 2 cells
    x = tf.placeholder(tf.int64, [None, MAX_DOCUMENT_LENGTH])
    y_ = tf.placeholder(tf.int64)
    logits, word_list = rnn_model(x)

    # 1 cell
    x2 = tf.placeholder(tf.int64, [None, MAX_DOCUMENT_LENGTH])
    y2_ = tf.placeholder(tf.int64)
    logits2, word_list2 = rnn_model2(x2)

    #2 cells
    entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(
            y_, MAX_LABEL),
                                                   logits=logits))
    train_op = tf.train.AdamOptimizer(lr).minimize(entropy)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(logits, axis=1), y_), tf.float64))

    #1cell
    entropy2 = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(
            y2_, MAX_LABEL),
                                                   logits=logits2))
    train_op2 = tf.train.AdamOptimizer(lr).minimize(entropy2)
    accuracy2 = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(logits2, axis=1), y2_), tf.float64))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # 2 cells
        loss = []
        loss_batch = []
        acc = []

        # 1 cell
        loss2 = []
        loss_batch2 = []
        acc2 = []

        # breaking down into batches
        N = len(x_train)
        idx = np.arange(N)

        for e in range(no_epochs):
            np.random.shuffle(idx)
            trainX_batch, trainY_batch = x_train[idx], y_train[idx]

            for start, end in zip(range(0, N, batch_size),
                                  range(batch_size, N, batch_size)):
                word_list_, _, loss_ = sess.run([word_list, train_op, entropy],
                                                {
                                                    x: trainX_batch[start:end],
                                                    y_: trainY_batch[start:end]
                                                })
                loss_batch.append(loss_)

                word_list2_, _, loss2_ = sess.run(
                    [word_list2, train_op2, entropy2], {
                        x2: trainX_batch[start:end],
                        y2_: trainY_batch[start:end]
                    })
                loss_batch2.append(loss2_)

            #2cells
            loss.append(sum(loss_batch) / len(loss_batch))
            loss_batch[:] = []
            acc.append(accuracy.eval(feed_dict={x: x_test, y_: y_test}))

            #1cell
            loss2.append(sum(loss_batch2) / len(loss_batch2))
            loss_batch2[:] = []
            acc2.append(accuracy2.eval(feed_dict={x2: x_test, y2_: y_test}))

            if e % 10 == 0:
                print('2 Cells, epoch: %d, entropy: %g' % (e, loss[e]))
                print('2 Cells, epoch: %d, accuracy: %g' % (e, acc[e]))
                print('1 Cell, epoch: %d, entropy: %g' % (e, loss2[e]))
                print('1 Cell, epoch: %d, accuracy: %g' % (e, acc2[e]))

        pylab.figure(1)
        pylab.plot(range(len(loss)), loss)
        pylab.plot(range(len(loss2)), loss2)
        pylab.xlabel('epochs')
        pylab.ylabel('entropy')
        pylab.legend(['2 Cells', '1 Cell'])
        pylab.savefig('figures/partb_6b(3)_entropy_merged.png')

        pylab.figure(2)
        pylab.plot(range(len(acc)), acc)
        pylab.plot(range(len(acc2)), acc2)
        pylab.xlabel('epochs')
        pylab.ylabel('accuracy')
        pylab.legend(['2 Cells', '1 Cell'])
        pylab.savefig('figures/partb_6b(3)_accuracy_merged.png')

        pylab.show()
Example #41
0
def make_2d_scatterhist(filename,
                        columns,
                        bin_numbers=None,
                        file_lineskip=1,
                        titles=None,
                        xlabels=None,
                        savefig=False):
    """Read data from 'filename' in columns 'columns' and create a histogram from the result.

    This method reads the data and stores it in memory, to allow pylab to create the histogram
    with the optimal ranges for the data, while the number of bins can be specified with 'bin_numbers'.
    Can optionally subsample the file, reading only every file_lineskip line.
    Returns the data read from 'filename' in a dictionary of numpy arrays so if this was called from an
    interactive python shell, the histogram could be redone to visually examine the results of
    different ranges and number of bins. """
    # filename = name of data file.
    # columns = column numbers to read from data file. Count starts at 1. List of ints. Can only be 2 columns.
    # bin_numbers = can be specified (otherwise defaults to 100).
    #               gives number of bins in histogram. List of ints, same length as columns.
    # file_lineskip = read only lines which are multiples of file_lineskip.
    if len(columns) > 2:
        raise Exception('This routine can only handle 2 columns.')
    for c in columns:
        if not (isinstance(c, int)):
            raise Exception('Error - columns should be list of ints.')
    if bin_numbers != None:
        if ((len(bin_numbers) != len(columns)) & (len(bin_numbers) > 1)):
            raise Exception(
                'Error - bin_numbers should be same length as columns or length 1.'
            )
    # Set up dictionary and lists to save data while being read.
    data = {}
    for c in columns:
        data[c] = []
    # Read the data.
    # Open data file.
    f = open(filename, 'r')
    line_num = 0
    for line in f:
        line_num = line_num + 1
        # Skip comment lines.
        if (line.startswith("#") | line.startswith("!")):
            continue
        # Skip the lines which are not multiples of file_lineskip (to enable subsampling file).
        #  i.e. if file_lineskip = 10, this will read only every tenth line.
        if ((line_num % file_lineskip) != 0):
            continue
        values = line.split()
        # If there are not enough values in the line, quit reading (assume end of file).
        if len(values) < max(columns):
            break
        # Assign data to dictionary.
        for c in columns:
            data[c].append(values[c - 1])
    # Close file.
    f.close()
    # Convert to numpy arrays.
    for c in columns:
        data[c] = numpy.array(data[c], dtype='float')
    # Set up to create histograms.
    # Set bin_numbers to default value, if not set by user.
    if bin_numbers == None:
        bin_numbers = [100]
    if len(bin_numbers) == 1:
        for c in columns:
            bin_numbers.append(bin_numbers[0])
    # Create scatter plot / histograms. One figure only.
    nullfmt = NullFormatter()  # no labels
    # definitions for the axes - set any values.
    left, width = 0.1, 0.65
    bottom, height = 0.1, 0.65
    bottom_h = left_h = left + width + 0.02
    rect_scatter = [left, bottom, width, height]
    rect_histx = [left, bottom_h, width, 0.2]
    rect_histy = [left_h, bottom, 0.2, height]
    # start with a rectangular Figure
    pylab.figure(1, figsize=(8, 8))
    axScatter = pylab.axes(rect_scatter)
    axHistx = pylab.axes(rect_histx)
    axHisty = pylab.axes(rect_histy)
    # no labels
    axHistx.xaxis.set_major_formatter(nullfmt)
    axHisty.yaxis.set_major_formatter(nullfmt)
    # Create the scatter plot.
    axScatter.scatter(data[columns[0]], data[columns[1]])
    # Now determine nice limits by hand.
    xymax = numpy.max(
        [numpy.max(data[columns[0]]),
         numpy.max(data[columns[1]])])
    xymin = numpy.min(
        [numpy.min(data[columns[0]]),
         numpy.min(data[columns[1]])])
    binwidth = (xymax - xymin) / bin_numbers[0]
    lim = (int(xymax / binwidth) + 0.5) * binwidth
    axScatter.set_xlim((-lim, lim))
    axScatter.set_ylim((-lim, lim))
    bins = numpy.arange(-lim, lim + binwidth, binwidth)
    n = {}
    n[columns[0]], b, p = axHistx.hist(data[columns[0]], bins=bins)
    n[columns[1]], b, p = axHisty.hist(data[columns[1]],
                                       bins=bins,
                                       orientation='horizontal')
    axHistx.set_xlim(axScatter.get_xlim())
    axHisty.set_ylim(axScatter.get_ylim())
    if xlabels != None:
        pylab.xlabel(xlabels[0])
        pylab.ylabel(xlabels[1])
    if savefig:
        figname = "hist_%d" % (c)
        pylab.savefig(figname + "." + figformat, format=figformat)
    # Calculate some basic statistics for output.
    stats = {}
    statlist = ('data_min', 'data_max', 'data_ave', 'hist_min', 'hist_max',
                'hist_ave')
    for c in columns:
        stats[c] = {}
        stats[c]['data_min'] = data[c].min()
        stats[c]['data_max'] = data[c].max()
        stats[c]['data_ave'] = data[c].sum() / float(len(data[c]))
        stats[c]['hist_min'] = n[c].min()
        stats[c]['hist_max'] = n[c].max()
        stats[c]['hist_ave'] = n[c].sum() / float(len(n[c]))
    print("")
    writestring = "# column "
    for key in statlist:
        writestring = writestring + " %s " % (key)
    print(writestring)
    for c in columns:
        writestring = "c %d " % (c)
        for key in statlist:
            writestring = writestring + "%g " % (stats[c][key])
        print(writestring)
    return data, stats
Example #42
0
                            zorder=10)

        ts_map = copy.copy(blank)
        ts_map[pix] = ts[idx] if ts.ndim > 1 else ts

        stellar_map = copy.copy(blank)
        stellar_map[pix] = stellar[idx] if stellar.ndim > 1 else stellar
        ##frac_map = copy.copy(blank); frac_map[pix] = frac[idx]
        slices = [(ts_map, 'TS'), (stellar_map, 'Stellar'),
                  (maglim_map, 'Maglim (g)'), (density_map, 'Density')]
        for i, (map, label) in enumerate(slices):

            img = healpy.gnomview(map,
                                  rot=[glon, glat],
                                  xsize=xsize,
                                  reso=reso,
                                  title='',
                                  sub=[2, 2, i + 1],
                                  notext=True)
            fig.gca().annotate(label, **label_kwargs)

        fig.suptitle(title,
                     y=0.95,
                     bbox={
                         'boxstyle': "round",
                         'fc': '1'
                     },
                     zorder=10)
        plt.draw()
        plt.savefig(name + '_analysis.png', bbox_inches='tight')
#*****************************************************************************

import pylab
from scipy import arange

# Got this data using
# python sailing_uct.py "main(5)"
# and so on.

partial_run_20 = [
    107, 55366, 5577, 130710, 173, 597, 2254, 24113, 74626, 131, 196, 55926,
    433053, 147, 280688, 362, 296, 1003, 7, 519, 7736
]

estimate_20 = sum(partial_run_20) / (1.0 * len(partial_run_20)) * 100

lake_size = [0, 5, 10, 20]

grid_size = [x**2 for x in lake_size]
nr_samples = [0, 21743, 436847, estimate_20]

pylab.loglog(grid_size, nr_samples, marker='*')
pylab.title("Number of samples to achieve error < 0.1 for sailing problem")
pylab.xlabel("grid size")
pylab.ylabel("number of samples")
pylab.xlim(xmin=1)
pylab.ylim(ymin=1)
pylab.grid(True)

pylab.savefig("nr_samples_uct_sailing.pdf")
Example #44
0
def make_histogram(filename,
                   columns,
                   bin_numbers=None,
                   file_lineskip=1,
                   titles=None,
                   xlabels=None,
                   savefig=False):
    """Read data from 'filename' in columns 'columns' and create a histogram from the result.

    This method reads the data and stores it in memory, to allow pylab to create the histogram
    with the optimal ranges for the data, while the number of bins can be specified with 'bin_numbers'.
    Can optionally subsample the file, reading only every file_lineskip line.
    Returns the data read from 'filename' in a dictionary of numpy arrays so if this was called from an
    interactive python shell, the histogram could be redone to visually examine the results of
    different ranges and number of bins. """
    # filename = name of data file.
    # columns = column numbers to read from data file. Count starts at 1. List of ints.
    # bin_numbers = can be specified (otherwise defaults to 100).
    #               gives number of bins in histogram. List of ints, same length as columns.
    # file_lineskip = read only lines which are multiples of file_lineskip.
    for c in columns:
        if not (isinstance(c, int)):
            raise Exception('Error - columns should be list of ints.')
    if bin_numbers != None:
        if ((len(bin_numbers) != len(columns)) & (len(bin_numbers) > 1)):
            raise Exception(
                'Error - bin_numbers should be same length as columns or length 1.'
            )
    # Set up dictionary and lists to save data while being read.
    data = {}
    for c in columns:
        data[c] = []
    # Read the data.
    # Open data file.
    f = open(filename, 'r')
    line_num = 0
    for line in f:
        line_num = line_num + 1
        # Skip comment lines.
        if (line.startswith("#") | line.startswith("!")):
            continue
        # Skip the lines which are not multiples of file_lineskip (to enable subsampling file).
        #  i.e. if file_lineskip = 10, this will read only every tenth line.
        if ((line_num % file_lineskip) != 0):
            continue
        values = line.split()
        # If there are not enough values in the line, quit reading (assume end of file).
        if len(values) < max(columns):
            break
        # Assign data to dictionary.
        for c in columns:
            data[c].append(values[c - 1])
    # Close file.
    f.close()
    # Convert to numpy arrays.
    for c in columns:
        data[c] = numpy.array(data[c], dtype='float')
    # Set up to create histograms.
    # Set bin_numbers to default value, if not set by user.
    if bin_numbers == None:
        bin_numbers = [100]
    if len(bin_numbers) == 1:
        for c in columns:
            bin_numbers.append(bin_numbers[0])
    # Create histograms - a new figure for each column.
    i = 0
    n = {}
    b = {}
    p = {}
    for c in columns:
        pylab.figure()
        n[c], b[c], p[c] = pylab.hist(data[c], bins=bin_numbers[i])
        if titles == None:
            pylab.title("%s Column %d" % (filename, c))
        else:
            pylab.title(titles[i])
        if xlabels != None:
            pylab.xlabel(xlabels[i])
        if savefig:
            figname = "hist_%d" % (c)
            pylab.savefig(figname + "." + figformat, format=figformat)
        i = i + 1
    # Calculate some basic statistics for output.
    stats = {}
    statlist = ('data_min', 'data_max', 'data_ave', 'hist_min', 'hist_max',
                'hist_ave')
    for c in columns:
        stats[c] = {}
        stats[c]['data_min'] = data[c].min()
        stats[c]['data_max'] = data[c].max()
        stats[c]['data_ave'] = data[c].sum() / float(len(data[c]))
        stats[c]['hist_min'] = n[c].min()
        stats[c]['hist_max'] = n[c].max()
        stats[c]['hist_ave'] = n[c].sum() / float(len(n[c]))
    print("")
    writestring = "# column "
    for key in statlist:
        writestring = writestring + " %s " % (key)
    print(writestring)
    for c in columns:
        writestring = "c %d " % (c)
        for key in statlist:
            writestring = writestring + "%g " % (stats[c][key])
        print(writestring)
    return data, stats
Example #45
0
celltypes = ['pyr', 'pv']
sub_fig_num = len(layers)

pylab.figure(1)
for xi, xin in enumerate(layers):
    pylab.subplot(sub_fig_num, 1, xi + 1)
    if xi == 0:
        pylab.title('V1_1')
    for yin in celltypes:
        spks = spikes['V1_1' + xin + yin]
        pylab.scatter(spks[0], spks[1], c=colors[yin], s=5, edgecolors='none')
        pylab.xlim([0.0, simtime])
    pylab.ylabel(xin)

pylab.savefig('../figs/raster_pref' + str(top_down_pyr) + '_' +
              str(top_down_pv) + '_' + str(msd) + '_' + str(fraction) + '_' +
              sim_len + '.eps')

pylab.figure(2)

for xi, xin in enumerate(layers):
    pylab.subplot(sub_fig_num, 1, xi + 1)
    if xi == 0:
        pylab.title('V1_2')
    for yin in celltypes:
        spks = spikes['V1_2' + xin + yin]
        pylab.scatter(spks[0], spks[1], c=colors[yin], s=5, edgecolors='none')
        pylab.xlim([0.0, simtime])
    pylab.ylabel(xin)
pylab.savefig('../figs/raster_nonpref' + str(top_down_pyr) + '_' +
              str(top_down_pv) + '_' + str(msd) + '_' + str(fraction) + '_' +
def main(subArea, runName, chartTitlePre, ownership):
    outDir = "C:\\Users\\olsenk\\Dropbox\\FPF\\Envision Runs\\keith_production\\"
    varList = [
        ' Early Successional (ha) Forest', ' Pole and Small (ha) Forest',
        ' Medium (ha) Forest', ' Large and Giant (ha) Forest',
        ' Open Canopy (ha) Forest', ' Closed Canopy (ha) Forest'
    ]
    yLabelText = 'Area (%)'
    chartTitle = chartTitlePre
    PMG345Ha = reporterFunc.getPMG345Ha(subArea)
    forestedHa = reporterFunc.getOwnerForestedHa(subArea)
    figTextList = [
        'Early successional', 'Pole and small', 'Medium', 'Large and giant',
        'Open canopy', 'Closed canopy'
    ]

    # list of ownerships to graphs
    ownersToGraph = [
        'Federal', 'State', 'Private Non-Industrial', 'Private Industrial',
        'Tribal', 'Homeowner'
    ]
    reporterName = r'ForestStructure2_by_OWNER_pivot.csv'
    ownerLabelField = ' OWNER_label'

    if ownership == 'All':
        pdfFile = PdfPages(outDir + 'report4_ForestStructure2_landscape.pdf')
    elif ownership in ownersToGraph:
        ownersToGraph = [ownership]
        pdfFile = PdfPages(outDir + 'report4_ForestStructure2_' +
                           ownersToGraph[0] + '.pdf')
    else:
        ownersToGraph = [ownership]
        pdfFile = PdfPages(outDir + 'report4_ForestStructure2_' +
                           ownersToGraph[0] + '.pdf')
        ownerLabelField = ' OWNER_DETL_label'
        reporterName = r'ForestStructure2_by_OWNER_DETL_pivot.csv'

    fig = pl.figure(1, figsize=(8, 6.5))
    for varStruct in varList:
        # setup plot for all scenarios
        ax = fig.add_subplot(2, 3, varList.index(varStruct) + 1)

        for scenario in [
                'CurrentPolicy', 'No_Treatment_Fed', 'Restoration',
                'noFireNoTreatFed'
        ]:
            inDir = outDir + runName + "_" + scenario + "\\"

            if os.path.isdir(inDir):
                yearList = list(
                    set(pd.io.parsers.read_csv(inDir + reporterName)[' Year']))
                repList = list(
                    set(pd.io.parsers.read_csv(inDir + reporterName)[' Run']))
                totalArea = pd.io.parsers.read_csv(inDir + reporterName)

                # get stats from multiple reps
                statsList = []
                for year in range(1, max(yearList) + 1):
                    yearArea = totalArea[totalArea[' Year'] == year]

                    dataList = []
                    for rep in repList:
                        repArea = yearArea[yearArea[' Run'] == rep]

                        # sum output over selected ownerships
                        for ownerToGraph in ownersToGraph:
                            if ownerToGraph == ownersToGraph[0]:
                                ownerArea = pd.DataFrame(repArea[
                                    repArea[ownerLabelField] == ownerToGraph])
                                fireProneArea345 = PMG345Ha[ownerToGraph]
                                fireProneArea = forestedHa[ownerToGraph]
                            else:
                                tempArea = repArea[repArea[ownerLabelField] ==
                                                   ownerToGraph]
                                for varName in varList:
                                    #                            totalArea.loc[list(ownerArea.index)[0],varName] += tempArea[varName].iloc[0]
                                    ownerArea[varName].iloc[0] += tempArea[
                                        varName].iloc[0]

                                fireProneArea345 += PMG345Ha[ownerToGraph]
                                fireProneArea += forestedHa[ownerToGraph]
                                ownerToGraph = 'All'

                        if varStruct in [
                                ' Resilient (ha) PMG345',
                                ' Semi-Resilient (ha) PMG345',
                                ' Low Resilience (ha) PMG345'
                        ]:
                            if fireProneArea345 > 0:
                                dataList.append(ownerArea[varStruct].iloc[0] /
                                                fireProneArea345 * 100)
                            else:
                                dataList.append(0.0)
                        else:
                            if fireProneArea > 0:
                                dataList.append(ownerArea[varStruct].iloc[0] /
                                                fireProneArea * 100)
                            else:
                                dataList.append(0.0)

                    # convert to numpy array
                    numpyList = np.array(dataList)
                    lower95th = np.mean(numpyList, axis=0) - (
                        (1.96 * np.std(numpyList, axis=0)) /
                        np.sqrt(len(repList)))
                    upper95th = np.mean(numpyList, axis=0) + (
                        (1.96 * np.std(numpyList, axis=0)) /
                        np.sqrt(len(repList)))

                    if lower95th < 0:
                        lower95th = 0.0

                    # add year data to dictionary
                    dataDict = {
                        'timeStep': year,
                        'mean': np.mean(numpyList, axis=0),
                        'std': np.std(numpyList, axis=0),
                        'lower': lower95th,
                        'upper': upper95th
                    }

                    # convert to list for DataFrame
                    statsList.append(dataDict)

                # convert to DataFrame
                dataTable = pd.DataFrame(statsList)

                plotLegend = (-99, -99)
                if varStruct == ' Pole and Small (ha) Forest':
                    plotLegend = (0.99, 0.33)

                if varList.index(varStruct) >= (len(varList) - 3):
                    labelXtick = True
                else:
                    labelXtick = False

                if varList.index(varStruct) == 0 or varList.index(
                        varStruct) == 3:
                    labelYtick = True
                else:
                    labelYtick = False

                xLabelText = yLabelText = ''
                reporterFunc.plotReporter4(
                    fig, ax, '', pdfFile, dataTable,
                    ['mean', 'lower', 'upper'], xLabelText, yLabelText,
                    scenario, labelXtick, labelYtick, plotLegend,
                    figTextList[varList.index(varStruct)])

    reporterFunc.plotFigureText(fig, 'Simulation Year', 'Area (%)')

    pl.savefig(outDir + 'report4_ForestStructure2_landscape.png',
               bbox_inches='tight',
               dpi=300)
    pdfFile.savefig()
    pl.close()
    pdfFile.close()
    print "Done."
Example #47
0
def get_cartesian_coords(q1,
                         q2,
                         q1_start_local_left=None,
                         q2_start_local_bottom=None,
                         return_jacobian=False):

    q1_midpoint = 0.5 * (af.max(q1) + af.min(q1))
    q2_midpoint = 0.5 * (af.max(q2) + af.min(q2))

    N_g = domain.N_ghost

    # Default initialisation to rectangular grid
    x = q1
    y = q2
    jacobian = [[1. + 0. * q1, 0. * q1], [0. * q1, 1. + 0. * q1]]
    [[dx_dq1, dx_dq2], [dy_dq1, dy_dq2]] = jacobian

    # Radius and center of circular region
    radius = 0.5
    center = [0, 0]

    if (q1_start_local_left != None and q2_start_local_bottom != None):

        x_0 = -0.33333333  #-radius/np.sqrt(2)

        x_y_bottom_left = [x_0, 0.]
        x_y_bottom_center = [0., 0.]
        x_y_bottom_right = [0.33333333, 0.]

        x_y_left_center = [x_0, (1.33333333) / 2]
        x_y_right_center = [0.33333333, (1.33333333) / 2]

        x_y_top_left = [x_0, 1.33333333]
        x_y_top_center = [0., 1.33333333]
        x_y_top_right = [0.33333333, 1.33333333]

        x, y, jacobian = quadratic(
            q1,
            q2,
            x_y_bottom_left,
            x_y_bottom_right,
            x_y_top_right,
            x_y_top_left,
            x_y_bottom_center,
            x_y_right_center,
            x_y_top_center,
            x_y_left_center,
            q1_start_local_left,
            q2_start_local_bottom,
        )

        pl.plot(af.moddims(dx_dq1[0, 0, :, N_g],
                           q1.dims()[2]).to_ndarray(),
                '-o',
                color='C0',
                alpha=0.5,
                label="dx_dq1")
        pl.plot(af.moddims(dy_dq1[0, 0, :, N_g],
                           q1.dims()[2]).to_ndarray(),
                '-o',
                color='k',
                alpha=0.5,
                label="dy_dq1")
        pl.plot(af.moddims(dx_dq2[0, 0, :, N_g],
                           q1.dims()[2]).to_ndarray(),
                '-o',
                color='C2',
                alpha=0.5,
                label="dx_dq2")
        pl.plot(af.moddims(dy_dq2[0, 0, :, N_g],
                           q1.dims()[2]).to_ndarray(),
                '-o',
                color='C3',
                alpha=0.5,
                label="dy_dq2")

        pl.legend(loc='best')

        pl.savefig(
            "/home/quazartech/bolt/example_problems/electronic_boltzmann/test_quadratic/iv.png"
        )
        pl.clf()

        if (return_jacobian):
            return (x, y, jacobian)
        else:
            return (x, y)

    else:
        print(
            "Error in get_cartesian_coords(): q1_start_local_left or q2_start_local_bottom not provided"
        )
Example #48
0
            for line in ret.split():
                if line[0].isdigit():
                    times[para - 1] = float(line)

        print times

        fig = pylab.figure()
        x = range(1, 10)
        y = times
        pylab.plot(x, y)
        pylab.xlabel("Parallelization Factor")
        pylab.ylabel("Execution Time in Seconds")
        pylab.title(
            "Execution Time Generating {0} Images vs Parallelization Factor".
            format(image_num))
        pylab.savefig(hiddencube_home + '/benchmark/para_vs_executiontime.png')
        pylab.close(fig)

    except KeyError:
        print "\t ERROR: You need to set the HIDDENCUBE_HOME environment variable to the path to the home directory of this project.  Execute a command similar to 'export HIDDENCUBE_HOME=/home/ncarey/gitrepos/HiddenCubeDataset'"

        #print "Total: {0}".format(times[0])
#    print "Total: {0}".format(times[-1] - times[0])
#    print "Start Set: {0}".format(times[1] - times[0])
#    print "Sim Set: {0}".format(times[2] - times[1])
#    print "RandRots: {0}".format(times[3] - times[2])
#    print "Color: {0}".format(times[4] - times[3])

#
Example #49
0
for step in range(nsteps):
    if step % 2 == 0:
        while True:
            #x = random.uniform(-1.0, 1.0)
            #p = math.exp(-0.5 * x ** 2 - alpha * x ** 4 )
            
            x=gauss_cut()
            p=math.exp(- alpha * x ** 4 )
            if random.uniform(0.0, 1.0) < p:
                break
    else:
        while True:
            #y = random.uniform(-1.0, 1.0)
            #p = math.exp(-0.5 * y ** 2 - alpha * y ** 4 )
            
            y=gauss_cut()
            p=math.exp(- alpha * y ** 4 )
            
            if random.uniform(0.0, 1.0) < p:
                break
    samples_x.append(x)
    samples_y.append(y)

pylab.hexbin(samples_x, samples_y, gridsize=50, bins=1000)
pylab.axis([-1.0, 1.0, -1.0, 1.0])
cb = pylab.colorbar()
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('A2_2')
pylab.savefig('plot_A2_2.png')
pylab.show()
Example #50
0
def normalize_dat_file(directory, filename, no_of_spectra_in_bunch,
                       median_filter_window, show_aver_spectra):
    """
    function calculates the average spectrum in DAT file and normalizes all spectra in file to average spectra
    Input parameters:
        directory - name of directory with initial dat file
        filename - name of initial dat file
        no_of_spectra_in_bunch - number of spectra in bunch to read
        median_filter_window - window of median filter to process the average spectrum
        show_aver_spectra - boolean variable which indicates if the picture of average spectrum to be shown and
                            the script paused till the picture window is closed
    Output parameters:
        output_file_name -  name of result normalized .dat file
    """

    print(
        '\n   Preparations and calculation of the average spectrum to normalize... \n'
    )

    output_file_name = directory + 'Norm_' + filename
    filename = directory + filename

    # Opening DAT datafile
    file = open(filename, 'rb')

    # *** Data file header read ***
    df_filesize = os.stat(filename).st_size  # Size of file
    df_filename = file.read(32).decode('utf-8').rstrip(
        '\x00')  # Initial data file name
    file.close()

    if df_filename[-4:] == '.adr':

        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, ReceiverMode, Mode,
            sumDifMode, NAvr, TimeRes, fmin, fmax, df, frequency, FFTsize,
            SLine, Width, BlockSize
        ] = FileHeaderReaderADR(filename, 0, 0)

    if df_filename[-4:] == '.jds':  # If data obtained from DSPZ receiver

        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
            FreqPointsNum, dataBlockSize
        ] = FileHeaderReaderJDS(filename, 0, 0)

    # Calculation of the dimensions of arrays to read
    nx = len(frequency)  # the first dimension of the array
    ny = int((
        (df_filesize - 1024) /
        (nx * 8)))  # the second dimension of the array: file size - 1024 bytes

    # Number of data blocks to read from file
    num_of_blocks = int(ny // no_of_spectra_in_bunch)

    # Read data from file by blocks and average it
    file = open(filename, 'rb')
    file.seek(1024)
    average_array = np.empty((nx, 0), float)
    for block in range(num_of_blocks):
        if block == (num_of_blocks - 1):
            spectra_num_in_bunch = ny - (num_of_blocks -
                                         1) * no_of_spectra_in_bunch
        else:
            spectra_num_in_bunch = no_of_spectra_in_bunch

        data = np.fromfile(file,
                           dtype=np.float64,
                           count=nx * spectra_num_in_bunch)
        data = np.reshape(data, [nx, spectra_num_in_bunch], order='F')
        tmp = np.empty((nx, 1), float)
        # tmp[:, 0] = data.mean(axis=1)[:]
        tmp[:, 0] = data.min(axis=1)[:]
        average_array = np.append(average_array, tmp, axis=1)  #

    # Average average spectra of all data blocks
    average_profile = average_array.mean(axis=1)

    init_average_profile = average_profile.copy()

    # # Make a figure of average spectrum (profile)
    # fig = plt.figure(figsize=(9, 5))
    # ax1 = fig.add_subplot(111)
    # ax1.plot(10 * np.log10(average_profile), linestyle='-', linewidth='1.00', label='Average spectra')
    # ax1.legend(loc='upper right', fontsize=6)
    # ax1.grid(b=True, which='both', color='silver', linestyle='-')
    # ax1.set_xlabel('Frequency points, num.', fontsize=6, fontweight='bold')
    # ax1.set_ylabel('Intensity, dB', fontsize=6, fontweight='bold')
    # pylab.savefig('Averaged_spectra_'+filename[:-4]+'_before_filtering.png', bbox_inches='tight', dpi=160)
    # plt.close('all')

    # Apply median filter to average profile
    average_profile = median_filter(average_profile, median_filter_window)
    med_average_profile = average_profile.copy()
    average_profile = average_filter(average_profile,
                                     median_filter_window + 20)

    # Make a figure of filtered average spectrum (profile)
    fig = plt.figure(figsize=(12, 8))
    ax1 = fig.add_subplot(111)
    ax1.plot(10 * np.log10(init_average_profile),
             linestyle='-',
             linewidth='1.50',
             label='Initial spectra',
             color='C0',
             alpha=0.6)
    ax1.plot(10 * np.log10(med_average_profile),
             linestyle='-',
             linewidth='1.25',
             label='Median spectra',
             color='C1',
             alpha=0.8)
    ax1.plot(10 * np.log10(average_profile),
             linestyle='-',
             linewidth='1.00',
             label='Median averaged spectra',
             color='C3')
    ax1.legend(loc='upper right', fontsize=6)
    ax1.grid(b=True, which='both', color='silver', linestyle='-')
    ax1.set_xlabel('Frequency points, num.', fontsize=6, fontweight='bold')
    ax1.set_ylabel('Intensity, dB', fontsize=6, fontweight='bold')
    pylab.savefig('Averaged_spectra_' + filename[:-4] + '_after_filtering.png',
                  bbox_inches='tight',
                  dpi=160)
    if show_aver_spectra:
        print('\n   Close the figure window to continue processing!!!\n')
        plt.show()
    plt.close('all')

    del init_average_profile, med_average_profile

    # Normalization
    print('   Spectra normalization... \n')
    file.seek(0)
    file_header = file.read(1024)
    normalized_file = open(output_file_name, 'wb')
    normalized_file.write(file_header)
    del file_header

    bar = IncrementalBar(' Normalizing of the DAT file: ',
                         max=num_of_blocks,
                         suffix='%(percent)d%%')
    bar.start()

    for block in range(num_of_blocks):

        if block == (num_of_blocks - 1):
            spectra_num_in_bunch = ny - (num_of_blocks -
                                         1) * no_of_spectra_in_bunch
        else:
            spectra_num_in_bunch = no_of_spectra_in_bunch

        data = np.fromfile(file,
                           dtype=np.float64,
                           count=nx * spectra_num_in_bunch)
        data = np.reshape(data, [nx, spectra_num_in_bunch], order='F')
        for j in range(spectra_num_in_bunch):
            data[:, j] = data[:, j] / average_profile[:]
        temp = data.transpose().copy(order='C')
        normalized_file.write(np.float64(temp))

        bar.next()

    file.close()
    normalized_file.close()
    bar.finish()

    # *** Creating a new timeline TXT file for results ***
    new_tl_file_name = output_file_name.split('_Data_', 1)[0] + '_Timeline.txt'
    new_tl_file = open(
        new_tl_file_name,
        'w')  # Open and close to delete the file with the same name
    new_tl_file.close()

    # *** Reading timeline file ***
    old_tl_file_name = filename.split('_Data_', 1)[0] + '_Timeline.txt'
    old_tl_file = open(old_tl_file_name, 'r')
    new_tl_file = open(new_tl_file_name, 'w')

    # Read time from timeline file
    time_scale_bunch = old_tl_file.readlines()

    # Saving time data to new file
    for j in range(len(time_scale_bunch)):
        new_tl_file.write((time_scale_bunch[j][:]) + '')

    old_tl_file.close()
    new_tl_file.close()

    return output_file_name
Example #51
0
def Doplots_diurnal_monthly(mypathforResults,PlottingDF,variable_to_fill, Site_ID,units,item,index_str):
    ANN_label=str(item+"_NN")     #Do Monthly Plot
    print "Doing monthly diurnal plot for month and index ",index_str
    #Do Diurnal Plots for all 12 months
    #create an X axis series for all 24 hours
    t = np.arange(1, 25, 1)
    NN_label='Fc'
    Plottemp = PlottingDF[[NN_label,item]]#[PlottingDF['day_night']!=3]
    #Plottemp = PlottingDF[[NN_label,item]].dropna(how='any')
	    
    figure(1)
    pl.subplot(321)
    pl.title('Diurnal '+item+' month = 1')
    try:
	xdata1a=Plottemp[(PlottingDF.index.month==1)][item].groupby([lambda x: x.hour]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[(PlottingDF.index.month==1)][NN_label].groupby([lambda x: x.hour]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(t,xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(t,xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')    
 
    pl.subplot(322)
    pl.title('Diurnal '+item+' month = 3')
    try:
	xdata1a=Plottemp[(PlottingDF.index.month==3)][item].groupby([lambda x: x.hour]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[(PlottingDF.index.month==3)][NN_label].groupby([lambda x: x.hour]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(t,xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(t,xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')      

    pl.subplot(323)
    pl.title('Diurnal '+item+' month = 5')
    try:
	xdata1a=Plottemp[(PlottingDF.index.month==5)][item].groupby([lambda x: x.hour]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[(PlottingDF.index.month==5)][NN_label].groupby([lambda x: x.hour]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(t,xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(t,xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')      
    
    pl.subplot(324)
    pl.title('Diurnal '+item+' month = 7')
    try:
	xdata1a=Plottemp[(PlottingDF.index.month==7)][item].groupby([lambda x: x.hour]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[(PlottingDF.index.month==7)][NN_label].groupby([lambda x: x.hour]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(t,xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(t,xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')  
    
    pl.subplot(325)
    pl.title('Diurnal '+item+' month = 9')
    try:
	xdata1a=Plottemp[(PlottingDF.index.month==9)][item].groupby([lambda x: x.hour]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[(PlottingDF.index.month==9)][NN_label].groupby([lambda x: x.hour]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(t,xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(t,xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')  
    
    pl.subplot(326)
    pl.title('Diurnal '+item+' month = 11')
    try:
	xdata1a=Plottemp[(PlottingDF.index.month==11)][item].groupby([lambda x: x.hour]).mean()
	plotxdata1a=True
    except:
	plotxdata1a=False
    try:
	xdata1b=Plottemp[(PlottingDF.index.month==11)][NN_label].groupby([lambda x: x.hour]).mean()
	plotxdata1b=True
    except:
	plotxdata1b=False 
    if plotxdata1a==True:
	pl.plot(t,xdata1a,'r',label=item) 
    if plotxdata1b==True:
	pl.plot(t,xdata1b,'b',label=NN_label)
    pl.ylabel('Flux')  
    
    pl.suptitle('Monthly ANN ensemble diurnal  for  '+item+' at '+Site_ID+ ' index '+index_str)
    pl.subplots_adjust(top=0.85)
    pl.tight_layout()  
    pl.savefig(mypathforResults+'/Monthly ANN ensemble diurnal  for  '+item+' at '+Site_ID+ ' index '+index_str)
    #pl.show() 
    pl.close()
    pl.close(1)
    time.sleep(1)
            alpha=0.1)  #, label=r"layer %u" % (layerNum))

scatCoeff = 1. / applyOpenCLWlenDependentFunction(
    wlens,
    mediumProps.GetScatteringLength(exampleLayerNum),
    useReferenceFunction=True)
cx.plot(wlens,
        1. / scatCoeff,
        linewidth=3.,
        linestyle='-',
        color='k',
        label=r"C++")

scatCoeff = 1. / applyOpenCLMediumPropertyFunction(
    wlens, exampleLayerNum, mediumProps, mode="scatteringLength")
cx.plot(wlens,
        1. / scatCoeff,
        linewidth=1.,
        linestyle='-',
        color='r',
        label=r"OpenCL")

cx.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(20))
cx.set_xlim(260., 690.)
cx.grid(True)
cx.legend(loc='upper right')
cx.set_xlabel(r"wavelength $\lambda [\mathrm{nm}]$")
cx.set_ylabel(r"scattering length $\lambda_\mathrm{scat;geom}$ $[\mathrm{m}]$")

pylab.savefig("medium_properties_Antares.pdf", transparent=False)
Example #53
0
def Wave1DAnim(snapshots,
               ds,
               dt,
               vel=None,
               filename='wave1danim',
               anim="gif",
               fps=10):
    r"""
    Create an animation file from a matrix resulting from a simulation of a 1d wave field.
    Creates many intermediate files to achieve that, uses ImageMagick

    * snapshots : is a 2d matrix [time][nx] - pressure field
    * ds        : space equal in x axis
    * dt        : time increment between simulation steps
    * vel       : 1d background velocity field
    * filename  : file name for the animation file
    * anim      : animation type (gif or avi)
    * fps       : frames per second
    """

    py.ion()
    #max index time and max index space
    maxt = np.shape(snapshots)[0]
    maxk = np.shape(snapshots)[1]
    # get the maximum and minimum u value to not blow the scale
    # during the movie
    ymax = snapshots.max()
    ymin = snapshots.min()
    # extents of the picture x starts at 0
    xmin, xmax = 0, maxk * ds
    extent = xmin, xmax, ymin, ymax
    # font position
    width = xmax - xmin
    height = ymax - ymin
    posx = 0.8 * width + xmin
    posy = 0.8 * height + ymin
    # not working?
    # verticalalignment='top',
    # horizontalalignment='right',
    _ClearTempImages(filename, "png")  # clear any previous existing
    for t in range(maxt):
        Wave1DPlot(snapshots[t], extent, vel)
        # set axis ranges
        py.hold(True)
        # draw time
        py.text(posx, posy, "{0:1.5f}".format(t * dt), alpha=0.8, color='b')
        # since its just math objects would be perfect
        # will be something like Wave1DAnim001.png
        py.savefig(filename + "{0:03d}".format(t) + '.png', dpi=150)
        sys.stdout.write("\r progressing .. %.1f%%" %
                         (100.0 * float(t) / maxt))
        sys.stdout.flush()
        py.clf()
    sys.stdout.write(" done! \n")
    py.ioff()
    py.hold(False)
    py.close()
    if (anim == "gif"):
        AnimFromPng(filename, fps=fps)
    else:
        AnimFromPng(filename, False, fps)
    _ClearTempImages(filename, "png")
                        f"_selfcal{last_selfcal}", "")
                    if "_finaliter" in preselfcal_name:
                        preselfcal_name = preselfcal_name.replace(
                            "_finaliter", "")
                if "_selfcal" in preselfcal_name:
                    raise ValueError("?!?!?!")

                try:
                    with warnings.catch_warnings():
                        warnings.filterwarnings('ignore')
                        ax1, ax2, ax3, fig, diffstats = make_comparison_image(
                            preselfcal_name, postselfcal_name)
                    if not os.path.exists(f"{field}/B{band}/comparisons/"):
                        os.mkdir(f"{field}/B{band}/comparisons/")
                    pl.savefig(
                        f"{field}/B{band}/comparisons/{field}_B{band}_{config}_selfcal{last_selfcal}_comparison.png",
                        bbox_inches='tight')
                except IndexError:
                    raise
                except Exception as ex:
                    log.error(
                        f"Failure for pre={preselfcal_name} post={postselfcal_name}"
                    )
                    log.error((field, band, config, ex))
                    continue

                matchrow = ((tbl['region'] == field) &
                            (tbl['band'] == f'B{band}') &
                            (tbl['array']
                             == ('12Monly' if config == '12M' else config)) &
                            (tbl['robust'] == 'r0.0'))
Example #55
0
def plot_with_matchingCode(pngfilename,
                           plotter1_big,
                           plotter2_small,
                           biggerFile,
                           smallersizeFile,
                           custom_dpi=90):

    codematches_biggerFile, codematches_smallerFile = ParseMatchingCode(
        biggerFile, smallersizeFile)

    # for x in codematches_biggerFile:
    # print("0x%x - 0x%x" % (x[0],x[1]))

    fig = figure(figsize=(16, 12))
    # fig.tight_layout()

    subplot1 = fig.add_subplot(211)
    subplot2 = fig.add_subplot(212)

    BYTES = FA.load(biggerFile)
    if BYTES is None:
        print("Plot error: file %s not found." % (biggerFile))
        return

    # Byte offsets.
    BYTES_indices = np.arange(0, BYTES.size)

    # subplot.axis('off')
    plotter1_big._generate_plot(BYTES, BYTES_indices, subplot1)

    plotRegion(subplot1, BYTES, BYTES_indices, codematches_biggerFile)

    del BYTES
    del BYTES_indices

    BYTES_SMALLER_FILE = FA.load(smallersizeFile)
    smallfileEnd = BYTES_SMALLER_FILE.size

    if BYTES_SMALLER_FILE is None:
        print("Plot error: file %s not found." % (smallersizeFile))
        return

    # Byte offsets.
    BYTES_indices = np.arange(0, plotter1_big._file_size)

    BYTES = np.zeros((plotter1_big._file_size), dtype=np.uint32)
    BYTES[:smallfileEnd] = BYTES_SMALLER_FILE

    plotter2_small._file_size = plotter1_big._file_size

    plotter2_small._generate_plot(BYTES, BYTES_indices, subplot2)

    plotRegion(subplot2, BYTES, BYTES_indices, codematches_smallerFile)

    subplot2.set_title('{} ({} kB)'.format(plotter2_small._short_filename,
                                           round(1.0 * (smallfileEnd / 1024))))
    axvline(x=smallfileEnd, ymin=0, ymax=255, linewidth=6.0, color='r')
    axvline(x=smallfileEnd, ymin=0, ymax=255, linewidth=2.0, color='k')

    subplot2.add_patch(
        matplotlib.patches.Rectangle((smallfileEnd, 0.0),
                                     (plotter1_big._file_size - smallfileEnd),
                                     255,
                                     fill=True,
                                     color="#cccccc"))

    for ax in fig.axes:
        matplotlib.pyplot.sca(ax)
        xticks(rotation=315)

    subplots_adjust(left=0.1,
                    right=0.9,
                    top=0.9,
                    bottom=0.1,
                    wspace=0.3,
                    hspace=0.3)

    del BYTES
    del BYTES_SMALLER_FILE
    del BYTES_indices

    legend(loc='upper right')

    # Extra wishes to Matplotlib:
    #    don't waste so much white space, and put the legend to upper right.

    savefig("/ram/%s" % (pngfilename), dpi=custom_dpi)
    close(fig)
    time.sleep(0.5)

    clf()

    return
Example #56
0
def Detect_Logo_SIFT_Video(args):

    Debug = int(args.debug)
    logo = cv2.imread(args.logo)  # logo image
    scale = 4
    logo = cv2.resize(logo,
                      None,
                      fx=scale,
                      fy=scale,
                      interpolation=cv2.INTER_CUBIC)

    vinput = args.input  # input video
    if not os.path.isfile(vinput):
        logging.error('---video does not exist---')
        sys.exit(1)

    cap = cv2.VideoCapture(vinput)
    logging.warning(
        '***************************************Opening the video: ' +
        args.input +
        ' for TV Logo detection**********************************************')
    fintv = float(args.frequency)
    fps = cap.get(5)  # frame per second in video
    frn = int(cap.get(7))  # frame number

    outputpath = args.outputpath
    if outputpath != '' and not os.path.exists(outputpath):
        os.makedirs(outputpath)

    # verify beginning and end time
    if args.beginning is None:
        bese = 0
    else:
        bese = args.beginning
    if args.end is None:
        endse = (frn / fps)
    else:
        endse = args.end
    if bese >= endse or bese < 0 or endse > (frn / fps):
        logging.error('wrong arguments of beginning and end time')
        sys.exit(1)

    logging.info('process each segment of video {0}'.format(args.input))
    befr = int(bese * fps)  # begining frame
    endfr = int(endse * fps)  # ending frame

    n_matches = []
    frames = []
    if cap.isOpened():  # if video is opened, process frames
        ret, frame = cap.read()
        counter = 0
        #print('endfr = %d' % endfr + 'endse %d ' % endse + 'fps %d' %fps + 'frn %d' %frn )
        for i in xrange(befr, endfr, int(np.round(fps / fintv))):
            #print('i = %d' %i + '/ %d' %frn)
            while (counter != i):
                #print('counter = %d' %counter)
                ret, frame = cap.read()
                counter += 1
            #Crop the image to the ROI and zoom for better detection performances
            x1 = int(args.x1)
            x2 = int(args.x2)
            y1 = int(args.y1)
            y2 = int(args.y2)
            #print('Crop image to x1 = %d' %x1 + 'x2 = %d' %x2+'y1 = %d' %y1 + 'y2 = %d'  %y2)
            frame_ROI = frame[x1:x2, y1:y2]
            cv2.imwrite('frame_ROI.png', frame_ROI)
            scale = 4
            #frame_ROI = cv2.resize(frame_ROI, None, fx= scale, fy= scale, interpolation=cv2.INTER_CUBIC)

            #n_matches.append(Detect_Logo_SIFT_Frame(logo,frame_ROI,Debug, i))
            #n_matches.append(Detect_Logo_SURF_Frame(logo,frame_ROI,Debug, i))
            #n_matches.append(Detect_Logo_ORB_Frame(logo,frame_ROI,Debug, i))
            n_matches.append(Detect_Logo_BRISK_Frame(logo, frame_ROI, Debug,
                                                     i))
            #n_matches.append(Detect_Logo_FREAK_Frame(logo,frame_ROI,Debug, i))

            frames.append(int(i / fps))

    pl.figure(figsize=(30, 4))
    chunckname_wextension = os.path.basename(args.input)
    chunckname = chunckname_wextension.split('.')[0]
    if not os.path.isfile('/opt/exe/textocr/demo/Chunks/GroundTruth/' +
                          chunckname + '_Pub_GroundTruth.txt'):
        logging.warning(
            'No ground Truth file found for commercial adds detection')
        pl.plot(frames, n_matches, 'r')
    else:
        GT = np.loadtxt('/opt/exe/textocr/demo/Chunks/GroundTruth/' +
                        chunckname + '_Pub_GroundTruth.txt')
        GT = GT * (max(n_matches))
        print('GT dimension %d' % np.shape(GT))
        print('histo_array dimension %d' % np.shape(n_matches))
        pl.plot(frames, n_matches, 'r', label='Logo match')
        #pl.plot(frames, GT, 'g', label='Ground Truth')
        pl.legend()

    pl.savefig(os.path.join(outputpath, args.outputname + "_logoMatch.jpg"),
               dpi=50)
    pl.show()
pl.rcParams["axes.labelsize"] = 18

au_in_si = 1.496e11
yr_in_si = (365.25 * 24. * 3600.)

widths = [1, 2, 3, 4, 5]

fig, ax = pl.subplots(5, 1, sharex=True, sharey=True)

for i in range(5):
    for res in [300, 900, 2700, 5400]:
        file = "convergence_instability_w{0}_{1}_radius.dat".format(
            widths[i], res)
        print "Plotting", file, "..."

        fp = np.memmap(file, dtype='d', mode='r')
        data = fp.reshape((-1, 3))

        ax[i].plot(data[:, 0] / yr_in_si,
                   data[:, 1] / au_in_si,
                   label="{0} cells".format(res))
    ax[i].axhline(y=30, linestyle="--", color="k")
    ax[i].axhline(y=10, linestyle="-", color="k")
    ax[i].set_title("$W = {0}$ AU".format(widths[i]))
    ax[i].set_ylabel("$R_I$ (AU)")

ax[4].set_xlabel("$t$ (yr)")
ax[0].legend(loc="lower left", ncol=2)
pl.tight_layout()
pl.savefig("fig_convergence_instability.png")
Example #58
0
def Wave2DAnim(snapshots,
               ds,
               dt,
               vel,
               filename='wave2danim',
               norm=True,
               vmin=None,
               vmax=None,
               anim="avi",
               fps=15):
    r"""
    Create an animation file from a matrix resulting from a simulation of a 2d wave field.
    Creates many intermediate files to achieve that, uses ImageMagick.
    Z is downward.

    * snapshots : is a 3d matrix [time][nz][nx] - pressure field
    * ds        : space equal in x/y axis
    * dt        : time increment between simulation steps
    * vel       : 2d background velocity field
    * filename  : file name for the animation file
    * anim      : animation type (gif or avi)
    * fps       : frames per second
    * norm      : scale the values getting the general max and min (vmax/vmin)
    * vmin      : global minimum of snapshots
    * vmax      : global maximum of snapshots
    """
    py.ion()
    #max index time and max index space
    maxt = np.shape(snapshots)[0]
    maxk = np.shape(snapshots)[1]
    maxi = np.shape(snapshots)[2]
    if norm:
        # get the maximum and minimum values of the last 5%
        # snapshots to not blow the scale during the animation
        # get the maximum and minimum values of the last 5%
        # snapshots to not blow the scale during the animation
        snaptmp = snapshots[-int(0.05 * maxt):]
        vmax = snaptmp.max()
        vmin = snaptmp.min()

        print "vmin : ", vmin, "vmax : ", vmax
    # space axis starting at 0 in x and z (using y coz' plotting)
    # extents of the picture,
    xmin, xmax = 0, ds * maxi
    ymin, ymax = 0, ds * maxk
    extent = xmin, xmax, ymax, ymin
    # font position
    width = xmax - xmin
    height = ymax - ymin
    posx = 0.8 * width + xmin
    posz = 0.8 * height + ymin
    # not working?
    # verticalalignment='top',
    # horizontalalignment='right'
    _ClearTempImages(filename, "png")  # clear any previous existing
    for t in xrange(maxt):
        py.hold(True)
        py.imshow(vel,
                  interpolation='bilinear',
                  cmap=cm.jet,
                  extent=extent,
                  origin='upper',
                  aspect='auto')
        py.imshow(snapshots[t],
                  interpolation='bilinear',
                  cmap=cm.Greys_r,
                  alpha=0.8,
                  extent=extent,
                  origin='upper',
                  aspect='auto',
                  vmin=vmin,
                  vmax=vmax)
        # optional cmap=cm.jet, apect='auto' adjust aspect to the previous plot
        py.show()
        # draw time
        py.text(posx,
                posz,
                "{0:1.5f}".format(t * dt),
                alpha=0.8,
                style='italic',
                color='b')
        # since its just math objects would be perfect
        # will be something like Wave1DAnim001.png
        py.savefig(filename + "{0:03d}".format(t) + '.png', dpi=150)
        sys.stderr.write("\r progressing .. %.1f%%" %
                         (100.0 * float(t) / maxt))
        sys.stderr.flush()
        py.clf()
    sys.stdout.write(" done! \n")
    py.hold(False)
    py.ioff()
    py.close()
    if (anim == "gif"):
        AnimFromPng(filename, fps=fps)
    else:
        AnimFromPng(filename, False, fps)
    _ClearTempImages(filename, "png")
Example #59
0
def multi_way(A=64):

    pylab.ion()
    pylab.figure(2, figsize=figsize)
    pylab.clf()

    qvec = [0.001, 0.01, 0.1]

    cvec = ['b', 'c', 'g']

    m = np.logspace(0, 5)
    pylab.loglog(m, m, 'k:', linewidth=lw)

    for (q, c) in zip(qvec, cvec):
        print q
        p = 1 - q

        R = np.floor(np.log(q) / np.log(1 - q))

        B = p**np.arange(1, R)
        D = np.ones(len(B))

        B = np.concatenate([B, q * p**np.arange(0, R)])
        D = np.concatenate([D, np.arange(R)])

        for pow in range(2, 88):
            B = np.concatenate([B, q**pow * p**np.arange(0, R)])
            D = np.concatenate([D, sm.comb(np.arange(R), pow)])
            assert len(B) == len(D), 'len(B) != len(D)'
            if len(B) > 10**8:
                print pow, 'breaking'
                break

        B = np.concatenate(([1], B))
        D = np.concatenate(([1], D))
        i = B.argsort()[::-1]
        B = (D[i] * B[i]).cumsum()
        D = D[i].cumsum()
        j = np.nonzero((D <= 10**5))[0]
        #pylab.loglog(np.arange(A, 100001), A*C[np.arange(A-1, 100000)/A])
        pylab.loglog(np.concatenate(([1], A * D[j])),
                     np.concatenate(([1], A * B[j])),
                     c,
                     linewidth=lw)
        pylab.draw()

    pylab.loglog(np.concatenate(([1], m * A)),
                 np.concatenate(([1], np.log2(m + 1) * A)),
                 'purple',
                 linewidth=lw)

    pylab.xlabel('Number of cores', fontsize=fs)
    pylab.ylabel('Expected speedup', fontsize=fs)
    pylab.title('Expected speedup with %d-way parallelism' % A, fontsize=fs)
    pylab.legend(['$E[S_J] = J$'] + [('$q = %1.4f' % q).strip('0') + '$'
                                     for q in qvec] + ['$q = 0.5$'],
                 loc='upper left',
                 fontsize=fs)
    pylab.xticks(fontsize=fs)
    pylab.yticks(fontsize=fs)
    pylab.axis((1, 10**4, 1, 10**4))
    pylab.savefig('../figs/speedup-%d.pdf' % A)
Example #60
0
def grid_plot(ref,
              sweep,
              plot_filename=None,
              shapefiles=None,
              interactive=False):

    # Set up colormaps

    from matplotlib.colors import BoundaryNorm

    cmapr = _ref_ctable
    cmapr.set_bad('white', 1.0)
    cmapr.set_under('white', 1.0)

    normr = BoundaryNorm(_ref_scale, cmapr.N)

    # Create png file label

    if plot_filename == None:
        print(
            "\n pyROTH.grid_plot:  No output file name is given, writing to %s"
            % "RF_...png")
        filename = "RF_%2.2d_plot.%s" % (sweep, _plot_format)
    else:
        filename = "%s.%s" % (plot_filename, _plot_format)

    fig, axes = plt.subplots(1, 2, sharey=True, figsize=(18, 10))

    # Set up coordinates for the plots

    sw_lon = ref.lons.min()
    ne_lon = ref.lons.max()
    sw_lat = ref.lats.min()
    ne_lat = ref.lats.max()

    bgmap = Basemap(projection='lcc', llcrnrlon=sw_lon,llcrnrlat=sw_lat,urcrnrlon=ne_lon,urcrnrlat=ne_lat, \
                    lat_0=0.5*(ne_lat+sw_lat), lon_0=0.5*(ne_lon+sw_lon), resolution='i', area_thresh=10., ax=axes[0])

    xg, yg = bgmap(ref.lons, ref.lats)

    yg_2d, xg_2d = np.meshgrid(yg, xg)

    yg_2d = yg_2d.transpose()
    xg_2d = xg_2d.transpose()

    # fix xg, yg coordinates so that pcolormesh plots them in the center.

    dx2 = 0.5 * (xg[1] - xg[0])
    dy2 = 0.5 * (yg[1] - yg[0])

    xe = np.append(xg - dx2, [xg[-1] + dx2])
    ye = np.append(yg - dy2, [yg[-1] + dy2])

    # CAPPI REFLECTVITY PLOT

    if shapefiles:
        plot_shapefiles(bgmap,
                        shapefiles=shapefiles,
                        counties=_plot_counties,
                        ax=axes[0])
    else:
        plot_shapefiles(bgmap, counties=_plot_counties, ax=axes[0])

# pixelated plot

    ref_data = ref.data[sweep]

    # im1  = bgmap.pcolormesh(xe, ye, ref_data, cmap=cmapr, norm=normr, vmin = _ref_min_plot, vmax = _ref_scale.max(), ax=axes[0])
    # cbar = bgmap.colorbar(im1,location='right')
    # cbar.set_label('Reflectivity (dBZ)')
    # axes[0].set_title('Pixel Reflectivity at Height:  %4.1f km' % 0.001*ref.zg[sweep])
    # at = AnchoredText("Max dBZ: %4.1f" % (ref_data.max()), loc=4, prop=dict(size=10), frameon=True,)
    # at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
    # axes[0].add_artist(at)

    # Contour filled plot

    # bgmap = Basemap(projection='lcc', llcrnrlon=sw_lon,llcrnrlat=sw_lat,urcrnrlon=ne_lon,urcrnrlat=ne_lat, \
    #                 lat_0=0.5*(ne_lat+sw_lat), lon_0=0.5*(ne_lon+sw_lon), resolution='i', area_thresh=10.0, ax=axes[1])

    # if shapefiles:
    #     plot_shapefiles(bgmap, shapefiles=shapefiles, counties=_plot_counties, ax=axes[1])
    # else:
    #     plot_shapefiles(bgmap, counties=_plot_counties, ax=axes[1])
    #

    im1 = bgmap.contourf(xg_2d, yg_2d, ref_data, levels= _ref_scale, cmap=cmapr, norm=normr, \
                         vmin = _ref_min_plot, vmax = _ref_scale.max(), ax=axes[0])

    cbar = bgmap.colorbar(im1, location='right')
    cbar.set_label('Reflectivity (dBZ)')
    axes[0].set_title('Reflectivity at Height:  %4.1f km' %
                      (0.001 * ref.zg[sweep]))
    at = AnchoredText(
        "Max dBZ: %4.1f" % (ref_data.max()),
        loc=4,
        prop=dict(size=10),
        frameon=True,
    )
    at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
    axes[0].add_artist(at)

    # COMPOSITE PLOT

    ref_data = ref.data.max(axis=0)
    zero_dbz = ref.zero_dbz.data

    bgmap = Basemap(projection='lcc', llcrnrlon=sw_lon,llcrnrlat=sw_lat,urcrnrlon=ne_lon,urcrnrlat=ne_lat, \
                    lat_0=0.5*(ne_lat+sw_lat), lon_0=0.5*(ne_lon+sw_lon), resolution='i', area_thresh=10.0, ax=axes[1])

    if shapefiles:
        plot_shapefiles(bgmap,
                        shapefiles=shapefiles,
                        counties=_plot_counties,
                        ax=axes[1])
    else:
        plot_shapefiles(bgmap, counties=_plot_counties, ax=axes[1])

# pixelated plot

# im1  = bgmap.pcolormesh(xe, ye, ref_data, cmap=cmapr, norm=normr, vmin = _ref_min_plot, vmax = _ref_scale.max(), ax=axes[2])
# cbar = bgmap.colorbar(im1,location='right')
# cbar.set_label('Reflectivity (dBZ)')
# axes[2].set_title('Pixel Composite Reflectivity at Height:  %4.1f km' % 0.001*ref.zg[sweep])
# at = AnchoredText("Max dBZ: %4.1f" % (ref_data.max()), loc=4, prop=dict(size=10), frameon=True,)
# at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
# axes[2].add_artist(at)

# contoured plot

# bgmap = Basemap(projection='lcc', llcrnrlon=sw_lon,llcrnrlat=sw_lat,urcrnrlon=ne_lon,urcrnrlat=ne_lat, \
#                 lat_0=0.5*(ne_lat+sw_lat), lon_0=0.5*(ne_lon+sw_lon), resolution='i', area_thresh=10.0, ax=axes[3])

# if shapefiles:
#     plot_shapefiles(bgmap, shapefiles=shapefiles, counties=_plot_counties, ax=axes[3])
# else:
#     plot_shapefiles(bgmap, counties=_plot_counties, ax=axes[3])

    im1 = bgmap.contourf(xg_2d, yg_2d, ref_data, levels= _ref_scale, cmap=cmapr, norm=normr, \
                         vmin = _ref_min_plot, vmax = _ref_scale.max(), ax=axes[1])

    cbar = bgmap.colorbar(im1, location='right')
    cbar.set_label('Reflectivity (dBZ)')
    axes[1].set_title('Composite Reflectivity')
    at = AnchoredText(
        "Max dBZ: %4.1f" % (ref_data.max()),
        loc=4,
        prop=dict(size=10),
        frameon=True,
    )
    at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
    axes[1].add_artist(at)

    # Plot zeros as "o"

    if zero_dbz != None:
        r_mask = (zero_dbz.mask == False)
        print("\n Number of zero reflectivity obs found:  %d" % np.sum(r_mask))
        bgmap.scatter(xg_2d[r_mask], yg_2d[r_mask], s=15, facecolors='none', \
                      edgecolors='k', alpha=0.3, ax=axes[1])

# Get other metadata....for labeling

    time_text = ref.time.strftime('%Y-%m-%d %H:%M')

    title = '\nDate:  %s   Time:  %s' % (time_text[0:10], time_text[10:19])

    plt.suptitle(title, fontsize=18)

    plt.savefig(filename)

    if interactive: plt.show()