예제 #1
0
def link_level_bars(levels, usages, quantiles, scheme, direction, color, nnames, lnames, admat=None):
    """
    Bar plots of nodes' link usage of links at different levels.
    """
    if not admat:
        admat = np.genfromtxt('./settings/eadmat.txt')
    if color == 'solar':
        cmap = Oranges_cmap
    elif color == 'wind':
        cmap = Blues_cmap
    elif color == 'backup':
        cmap = 'Greys'
    nodes, links = usages.shape
    usageLevels = np.zeros((nodes, levels))
    usageLevelsNorm = np.zeros((nodes, levels))
    for node in range(nodes):
        nl = neighbor_levels(node, levels, admat)
        for lvl in range(levels):
            ll = link_level(nl, lvl, nnames, lnames)
            ll = np.array(ll, dtype='int')
            usageSum = sum(usages[node, ll])
            linkSum = sum(quantiles[ll])
            usageLevels[node, lvl] = usageSum / linkSum
            if lvl == 0:
                usageLevelsNorm[node, lvl] = usageSum
            else:
                usageLevelsNorm[node, lvl] = usageSum / usageLevelsNorm[node, 0]
        usageLevelsNorm[:, 0] = 1

    # plot all nodes
    usages = usageLevels.transpose()
    plt.figure(figsize=(11, 3))
    ax = plt.subplot()
    plt.pcolormesh(usages[:, loadOrder], cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(1, nodes, nodes))
    ax.set_xticklabels(loadNames, rotation=60, ha="right", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.savefig(figPath + '/levels/' + str(scheme) + '/' + 'total' + '_' + str(direction) + '_' + color + '.pdf', bbox_inches='tight')
    plt.close()

    # plot all nodes normalised to usage of first level
    usages = usageLevelsNorm.transpose()
    plt.figure(figsize=(11, 3))
    ax = plt.subplot()
    plt.pcolormesh(usages[:, loadOrder], cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(1, nodes, nodes))
    ax.set_xticklabels(loadNames, rotation=60, ha="right", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.savefig(figPath + '/levels/' + str(scheme) + '/' + 'total_norm_cont_' + str(direction) + '_' + color + '.pdf', bbox_inches='tight')
    plt.close()
예제 #2
0
 def _plot(self):
     p = plt.pcolor(self.matrix, cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
     plt.colorbar(p)
     plt.xlim((0, self.matrix.shape[0]))
     plt.ylim((0, self.matrix.shape[1]))
     if self.labels is not None:
         plt.xticks(numpy.arange(0.5, len(self.labels) + 0.5), self.labels, fontsize=self.fontsize, rotation=90)
         plt.yticks(numpy.arange(0.5, len(self.labels) + 0.5), self.labels, fontsize=self.fontsize)
예제 #3
0
 def plot_mat(self, mat, fn):
     plt.matshow(asarray(mat.todense()))
     plt.axis('equal')
     sh = mat.shape
     plt.gca().set_yticks(range(0, sh[0]))
     plt.gca().set_xticks(range(0, sh[1]))
     plt.grid('on')
     plt.colorbar()
     plt.savefig(join(self.outs_dir, fn))
     plt.close()
예제 #4
0
 def plot_mat(self, mat, fn):
     plt.matshow(asarray(mat.todense()))
     plt.axis('equal')
     sh = mat.shape
     plt.gca().set_yticks(range(0,sh[0]))
     plt.gca().set_xticks(range(0,sh[1]))
     plt.grid('on')
     plt.colorbar()
     plt.savefig(join(self.outs_dir, fn))
     plt.close()
예제 #5
0
def show_melspectrogram(conf, mels, title='Log-frequency power spectrogram'):
    librosa.display.specshow(mels,
                             x_axis='time',
                             y_axis='mel',
                             sr=conf.sampling_rate,
                             hop_length=conf.hop_length,
                             fmin=conf.fmin,
                             fmax=conf.fmax)
    plt.colorbar(format='%+2.0f dB')
    plt.title(title)
    plt.show()
예제 #6
0
def main():

    r = compute()
    fig = plt.Figure(figsize=(8, 6))
    ax = plt.subplot(1, 1, 1, aspect=1 / 21)
    im = ax.imshow(r, origin="upper", extent=[0, 1, 1, 0], cmap=plt.cm.gray_r)
    plt.colorbar(im)

    ax.set_ylabel("Proportion of beach seen")
    ax.set_xlabel("Position")

    plt.savefig("{}/a_priori_x_vs_r_{}.pdf".format(folder, mode))
    plt.show()
예제 #7
0
def main():
    r = compute()
    fig = plt.Figure(figsize=(8, 6))
    ax = plt.subplot(1, 1, 1, aspect=1 / 21)
    im = ax.imshow(r, origin="lower", extent=[0, 1, 0, 1], cmap=plt.cm.gray_r)
    plt.colorbar(im)
    plt.title(
        cond.replace("_", " ").capitalize() +
        ", p={}, mode='{}'".format(fov, mode.replace("_", " ")))

    ax.set_xlabel("x2")
    ax.set_ylabel("x1")
    plt.savefig("../data/figures/a_priori_x_vs_x_{}_{}_{}.pdf".format(
        cond, fov * 100, mode))
    plt.show()
예제 #8
0
 def plot_slip(self, m, cmap=None, clim=None, zorder=0,
               cb_shrink = 0.8,
               cb_pad = 0.2
               ):
     self.pcolor_on_fault(m, cmap = cmap, zorder=zorder)
     cb = plt.colorbar(shrink=cb_shrink, pad=cb_pad)
     plt.clim(clim)
     cb.set_label('slip(m)')
예제 #9
0
def convert_all_to_png(vis_path, out_dir="maps_png", size=None):

    units = {
        'gas_density': 'Gas Density [g/cm$^3$]',
        'Tm': 'Temperature [K]',
        'Tew': 'Temperature [K]',
        'S': 'Entropy []',
        'dm': 'DM Density [g/cm$^3$]',
        'v': 'Velocity [km/s]'
    }

    log_list = ['gas_density']

    for vis_file in os.listdir(vis_path):
        if ".dat" not in vis_file:
            continue
        print "converting %s" % vis_file
        map_type = re.search('sigma_(.*)_[xyz]', vis_file).group(1)

        (image, pixel_size,
         axis_values) = read_visualization_data(vis_path + "/" + vis_file,
                                                size)
        print "image width in Mpc/h: ", axis_values[-1] * 2.0

        x, y = np.meshgrid(axis_values, axis_values)

        cmap_max = image.max()
        cmap_min = image.min()
        ''' plotting '''
        plt.figure(figsize=(5, 4))

        if map_type in log_list:
            plt.pcolor(x, y, image, norm=LogNorm(vmax=cmap_max, vmin=cmap_min))
        else:
            plt.pcolor(x, y, image, vmax=cmap_max, vmin=cmap_min)

        cbar = plt.colorbar()
        if map_type in units.keys():
            cbar.ax.set_ylabel(units[map_type])

        plt.axis(
            [axis_values[0], axis_values[-1], axis_values[0], axis_values[-1]])

        del image

        plt.xlabel(r"$Mpc/h$", fontsize=18)
        plt.ylabel(r"$Mpc/h$", fontsize=18)

        out_file = vis_file.replace("dat", "png")

        plt.savefig(out_dir + "/" + out_file, dpi=150)

        plt.close()
        plt.clf()
예제 #10
0
def convert_all_to_png(vis_path, out_dir = "maps_png", size = None) :

    units = { 'gas_density' : 'Gas Density [g/cm$^3$]',
              'Tm' : 'Temperature [K]',
              'Tew' : 'Temperature [K]',
              'S' : 'Entropy []',
              'dm' : 'DM Density [g/cm$^3$]',
              'v' : 'Velocity [km/s]' }

    log_list = ['gas_density']

    for vis_file in os.listdir(vis_path) :
        if ".dat" not in vis_file :
            continue
        print "converting %s" % vis_file
        map_type = re.search('sigma_(.*)_[xyz]', vis_file).group(1)

        (image, pixel_size, axis_values) = read_visualization_data(vis_path+"/"+vis_file, size)
        print "image width in Mpc/h: ", axis_values[-1]*2.0

        x, y = np.meshgrid( axis_values, axis_values )

        cmap_max = image.max()
        cmap_min = image.min()


        ''' plotting '''
        plt.figure(figsize=(5,4))

        if map_type in log_list:
            plt.pcolor(x,y,image, norm=LogNorm(vmax=cmap_max, vmin=cmap_min))
        else :
            plt.pcolor(x,y,image, vmax=cmap_max, vmin=cmap_min)

        cbar = plt.colorbar()
        if map_type in units.keys() :
            cbar.ax.set_ylabel(units[map_type])

        plt.axis([axis_values[0], axis_values[-1],axis_values[0], axis_values[-1]])

        del image

        plt.xlabel(r"$Mpc/h$", fontsize=18)
        plt.ylabel(r"$Mpc/h$", fontsize=18)

        out_file = vis_file.replace("dat", "png")

        plt.savefig(out_dir+"/"+out_file, dpi=150 )

        plt.close()
        plt.clf()
예제 #11
0
    def _plot(self):
        (binsX, binsY) = (self.bins, self.bins) if isinstance(self.bins, int) else self.bins
        X, Y = self.data
        H, ex, ey = numpy.histogram2d(X, Y, bins=(binsX, binsY))
        x_center = numpy.diff(ex) / 2 + ex[0:-1]
        x_digit = numpy.digitize(X, ex)
        y_center = numpy.empty(binsY)
        y_std = numpy.empty(binsY)
        for i in range(binsX):
            y_pop = Y[numpy.where(x_digit == i + 1)[0]]
            y_center[i] = numpy.mean(y_pop)
            y_std[i] = numpy.std(y_pop)

        plt.hexbin(X, Y, bins='log')
        plt.errorbar(x_center, y_center, y_std, fmt='r-')
        cb = plt.colorbar()
        plt.xlim(ex[0], ex[-1])
        cb.set_label('log10(N)')
예제 #12
0
def runAnalysis( caseDir , backbone , timeFactor ):

    # Retrieve info on the peptide
    resNames = backbone.resnames()
    
    # Go through each residue connection
    for i in range( 1, len(resNames) ):
        
        # User info
        print "Plotting dihedrals for residue: "+str(i)
        
        # Paths for the two files
        psiPath = caseDir+"/analysis/data/psi_"+str(i)  
        phiPath = caseDir+"/analysis/data/phi_"+str(i)

        # Common Plot command
        myPlot.plotData( 
            caseDir+"/analysis/plots" , 
            "Dihedral Angles. Res ID: "+str(i)+", Residue name: "+str(resNames[i]), 
            ["$\Psi_"+str(i)+"$","$\Phi_"+str(i)+"$"],
            [ psiPath , phiPath ] , 
            "Angle (Degrees)", 
            xFactor = timeFactor,
            scatter = True ,
            skipLines = 1,
            legendFrame = 1,
            legendAlpha = 1
        )
 
        # Create a Ramachandran plot
        ############################

        # User info
        print "Creating a Ramachandran plot for residue: "+str(i) 
 
        # Get the components
        components = []
        for path in [ phiPath, psiPath ]:
            components.append([])
            index = len(components)-1
            with open(path, "r") as fi:
                lines = fi.readlines()
                for line in lines:
                    temp = line.split()
                    try:
                        components[ index ].append( float( temp[1] ) )
                    except ValueError:
                        print "Reading Header: ",line
        
        # Set to numpy
        np_arrays = [ np.array( component ) for component in components ]  
        
        # Do the plotting
        title = "Ramachandran Plot. Res ID: "+str(i)+", Residue name: "+str(resNames[i])
        pp = PdfPages( caseDir+"/analysis/plots/"+title+".pdf" )
        fig = plt.figure() #figsize=(8,6)
        ax = fig.gca()
        ax.set_xlabel("$\Phi_"+str(i)+"$", fontsize=12)
        ax.set_ylabel("$\Psi_"+str(i)+"$", fontsize=12)
        
        # Create the histogram without plotting, so we can set the units properly   
        boltzman = 0.0019872041
        temperature = 300
        H, xedges, yedges = np.histogram2d(np_arrays[1], np_arrays[0], bins=100 )
        H_normalized = H/len(np_arrays[0])
        H = -1 * boltzman * temperature * (np.log( H_normalized )-np.log(np.max(H_normalized)))
        
        # Now plot the 2d histogram
        img = ax.imshow(H,  interpolation='nearest', origin='lower',extent=[yedges[0], yedges[-1],xedges[0], xedges[-1]] , rasterized=True )
        colorbar = plt.colorbar(img, ax=ax)
        colorbar.set_label("Kcal / mol")
        
        # For normal histogram plot
        #plt.hist2d(np_arrays[0], np_arrays[1], bins=100) 
        #plt.colorbar()
                
        plt.ylim([-180,180])
        plt.xlim([-180,180])
        
        plt.title( title )
        plt.savefig(pp, format="pdf",dpi=150)
        pp.close()
        
예제 #13
0
 def _plot(self):
     colormap = plt.pcolor(self.x, self.y, self.z, cmap=self.cmap)
     cb = plt.colorbar(colormap)
     cb.set_label('value')
예제 #14
0
    def show_plots(self):
        fig1 = plt.figure(figsize=[20, 20])
        ax1 = fig1.add_subplot(111)
        ax1.imshow(self.imdata)
        ax1.xaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormatter))
        ax1.yaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormatter))
        fig1.savefig(
            os.path.join(self.subdir,
                         "figure" + str(9999999999999999999999) + ".tif"))
        fig2 = plt.figure(figsize=[10, 10])
        ax2 = fig2.add_subplot(111)
        ax2.imshow(self.im_out)
        ax2.xaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormatter))
        ax2.yaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormatter))

        fig3 = plt.figure(figsize=[10, 10])
        ax3 = fig3.add_subplot(111)
        ax3.xaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormattera))
        ax3.set_xlabel("Emitters density $(1/ \mu m ^2)$")
        if showlog:
            ax3.set_xscale('log')
        ax3.set_ylim([0.0, 1.0])
        ax3a = ax3.twinx()
        if self.runs == 1:
            ax3.plot(self.em_nr,
                     self.corr[0],
                     ls="-",
                     color="green",
                     marker="None",
                     label="correlation")
            ax3.plot(self.em_nr,
                     self.binary[0],
                     ls="-",
                     color="red",
                     marker="None",
                     label="binary agreement")
            ax3a.plot(self.em_nr,
                      self.l2_norms[0],
                      ls="-",
                      color="blue",
                      marker="None",
                      label="L2 norm")
            self.data = numpy.array([
                numpy.multiply(1.0e-12 / self.area, self.em_nr), self.corr[0],
                self.binary[0], self.l2_norms[0]
            ]).transpose()
        else:
            ax3.plot(self.em_nr,
                     numpy.mean(self.corr, axis=0),
                     ls="-",
                     color="green",
                     marker="None",
                     label="correlation")
            ax3.plot(self.em_nr,
                     numpy.mean(self.binary, axis=0),
                     ls="-",
                     color="red",
                     marker="None",
                     label="binary agreement")
            ax3a.plot(self.em_nr,
                      numpy.mean(self.l2_norms, axis=0),
                      ls="-",
                      color="blue",
                      marker="None",
                      label="L2 norm")
            ax3.errorbar(self.em_nr,
                         numpy.mean(self.corr, axis=0),
                         numpy.std(self.corr, axis=0),
                         color="green")
            ax3.errorbar(self.em_nr,
                         numpy.mean(self.binary, axis=0),
                         numpy.std(self.binary, axis=0),
                         color="red")
            ax3a.errorbar(self.em_nr,
                          numpy.mean(self.l2_norms, axis=0),
                          numpy.std(self.binary, axis=0),
                          color="blue")
            self.data = numpy.array([
                numpy.multiply(1.0e-12 / self.area, self.em_nr),
                numpy.mean(self.corr, axis=0),
                numpy.std(self.corr, axis=0),
                numpy.mean(self.binary, axis=0),
                numpy.std(self.binary, axis=0),
                numpy.mean(self.l2_norms, axis=0),
                numpy.std(self.l2_norms, axis=0)
            ]).transpose()
        ax3.grid("on")
        ax3.legend(loc="upper left")
        ax3a.legend()
        fig5 = plt.figure(figsize=[10, 10])
        ax5 = fig5.add_subplot(111)
        ax5.imshow(self.diff[1], cmap=self.cmapr, alpha=0.8)
        ax5.imshow(self.diff[0], cmap=self.cmapb, alpha=0.6)
        ax5.xaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormatter))
        ax5.yaxis.set_major_formatter(
            mpl.ticker.FuncFormatter(self.mjrFormatter))
        fig3.savefig(os.path.join(self.subdir, "qmeasures.tif"))

        if runs_nr == 1:
            fig6 = plt.figure(figsize=[12, 14])
            ax6 = fig6.add_subplot(111)
            self.ft_data = numpy.array(self.ft_data)
            self.ftfactor = self.maxfreq / (self.ft_data.shape[1])
            cax6 = ax6.imshow(self.ft_data.transpose(), vmin=0.0, vmax=1.0)
            ax6.set_xticks(
                numpy.arange(
                    -0.5,
                    self.ft_data.shape[0] + 0.5 * self.ft_data.shape[0] / 10,
                    self.ft_data.shape[0] / 10))
            ax6.xaxis.set_major_formatter(
                mpl.ticker.FuncFormatter(self.mjrFormatterb))
            ax6.yaxis.set_major_formatter(
                mpl.ticker.FuncFormatter(self.mjrFormatterc))
            ax6.set_xlabel("Emitters density $[1000/ \mu m ^2]$")
            ax6.set_ylabel("Spatial frequency $[1/m]$")
            ax6ylims = ax6.get_ylim()
            #print ax6.get_xlim()
            ax6.set_ylim([
                ax6ylims[0] - 0.65 * (ax6ylims[0] - ax6ylims[1]), ax6ylims[1]
            ])
            fig6.autofmt_xdate(rotation=90)
            fig6.savefig(os.path.join(self.subdir, "transfunc.tif"))
            plt.colorbar(cax6)
            transfunc_output = os.path.join(self.subdir, "transfunc.txt")
            numpy.savetxt(transfunc_output,
                          self.ft_data.transpose(),
                          delimiter=",")

        #fig4 = plt.figure(figsize=[10,10])
        #ax4a = fig4.add_subplot(211)
        #ax4b = fig4.add_subplot(212)
        #ax4a.hist(self.imdata.flatten(),bins=10)
        #ax4b.hist(self.im_out.flatten(),bins=10)

        #ax5a.imshow(diff,cmap='jet')
        self.save_results()
        plt.show()
def example(tess='I',base=[2,2,2],nLevels=1,
            zero_v_across_bdry=[True]*3,
            vol_preserve=False,
           nRows=100, nCols=100,nSlices=100,
           use_mayavi=False,
           eval_v=False,
           eval_cell_idx=False):  
     
    tw = TransformWrapper(nRows=nRows,
                          nCols=nCols,
                          nSlices=nSlices,
                          nLevels=nLevels,  
                          base=base,
                          zero_v_across_bdry=zero_v_across_bdry,
                          tess=tess,
                          valid_outside=False,
                          only_local=False,
                          vol_preserve=vol_preserve)
     
     
    print_iterable(tw.ms.L_cpa_space)
    print tw
    
    # create some fake 3D image.
    img = np.zeros((nCols,nRows,nSlices),dtype=np.float64)
    
#    img[:]=np.random.random_integers(0,255,img.shape)
    
    # Fill the image with the x coordinates as fake values
    img[:]=tw.pts_src_dense.cpu[:,0].reshape(img.shape)
    
    img0 = CpuGpuArray(img.copy().astype(np.float64))
    img_wrapped_fwd= CpuGpuArray.zeros_like(img0)
    img_wrapped_inv= CpuGpuArray.zeros_like(img0)
    
     
    seed=0
    np.random.seed(seed)    
    
                  
    ms_Avees=tw.get_zeros_PA_all_levels()
    ms_theta=tw.get_zeros_theta_all_levels() 
    
    
    if tess == 'II' :        
        for level in range(tw.ms.nLevels): 
            cpa_space = tw.ms.L_cpa_space[level]  
            Avees = ms_Avees[level]    
#            1/0
            if level==0:
                tw.sample_gaussian(level,ms_Avees[level],ms_theta[level],mu=None)# zero mean
#                ms_theta[level].fill(0)
#                ms_theta[level][-4]=10
                cpa_space.theta2Avees(theta=ms_theta[level],Avees=Avees)
            else:
                tw.sample_from_the_ms_prior_coarse2fine_one_level(ms_Avees,ms_theta,
                                                                    level_fine=level)
    else:
        # For tess='I' in 3D, I have yet to implement the coarse-to-fine sampling.
        for level in range(tw.ms.nLevels): 
            cpa_space = tw.ms.L_cpa_space[level]
            velTess = cpa_space.zeros_velTess()
            ms_Avees[level].fill(0)
            Avees = ms_Avees[level]
            tw.sample_gaussian_velTess(level,Avees,velTess,mu=None)
    
       
    
    
    print 'img shape:',img0.shape
   
   
    # You don't have use these. You can use any 2d array
    # that has 3 columns (regardless of the number of rows).   
    pts_src = tw.pts_src_dense       
    pts_src=CpuGpuArray(pts_src.cpu[::1].copy())
	
    # Create a buffer for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src) 
    pts_inv = CpuGpuArray.zeros_like(pts_src)  
   
   
    for level in range(tw.ms.nLevels):              
        tw.update_pat_from_Avees(ms_Avees[level],level) 
        
         
        if eval_v:
            # Evaluating the velocity field. 
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally 
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level) 
        
        
        print 'level',level
        print
        print 'number of points:',len(pts_src)   
        print 'number of cells:',tw.ms.L_cpa_space[level].nC    
        
        
        
        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()           
        
        # Simply calling 
        #   tic = time.clock()
        # and then 
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that, 
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.
        
        
        tic = time.clock() 
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src,pts_fwd,level=level)
        timer_gpu_T_fwd.toc()   
        toc = time.clock()
        

        print 'Time, in sec, for computing T_fwd:'           
        print timer_gpu_T_fwd.secs
        print toc-tic  # likely to be 0, unless you also used the GpuTimer.
        
        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src,pts_inv,level=level)   
 
        
       
        if eval_cell_idx:   
            # cell_idx is computed here just for display. 
            cell_idx = CpuGpuArray.zeros(len(pts_src),dtype=np.int32)
            tw.calc_cell_idx(pts_src,cell_idx,level)
    
        tw.remap_fwd(pts_inv,img0,img_wrapped_fwd)
        tw.remap_inv(pts_fwd,img0,img_wrapped_inv)
        
         
    
        # For display purposes, do gpu2cpu transfer
        print "For display purposes, do gpu2cpu transfer"

        if eval_cell_idx:
            cell_idx.gpu2cpu()
        if eval_v:
            tw.v_dense.gpu2cpu() 
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_inv.gpu2cpu()
        
         
    
       
       
    
    
        if use_mayavi:
            ds=1 # downsampling factor
            i= 17
            pts_src_grid = pts_src.cpu.reshape(tw.nRows,tw.nCols,-1,3)
            pts_src_ds=pts_src_grid[::ds,::ds,i].reshape(-1,3)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows,tw.nCols,-1,3)
            pts_fwd_ds=pts_fwd_grid[::ds,::ds,i].reshape(-1,3)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows,tw.nCols,-1,3)
            pts_inv_ds=pts_inv_grid[::ds,::ds,i].reshape(-1,3)
        
        
            from of.my_mayavi import *
            mayavi_mlab_close_all()
            mayavi_mlab_figure_bgwhite('src')
            x,y,z=pts_src_ds.T
            mayavi_mlab_plot3d(x,y,z)
            mayavi_mlab_figure_bgwhite('fwd')
            x,y,z=pts_fwd_ds.T
            mayavi_mlab_plot3d(x,y,z)    
         
        figsize = (12,12)
        plt.figure(figsize=figsize)               
        i= 17 # some slice
        plt.subplot(131)
        plt.imshow(img0.cpu[:,:,i].astype(np.uint8),interpolation="Nearest")  
        plt.title('slice from img')
        plt.subplot(132)
        plt.imshow(img_wrapped_fwd.cpu[:,:,i].astype(np.uint8),interpolation="Nearest")  
        plt.axis('off') 
        plt.title('slice from fwd(img)')
        plt.subplot(133)
        plt.imshow(img_wrapped_inv.cpu[:,:,i].astype(np.uint8),interpolation="Nearest")    
        plt.axis('off') 
        plt.title('slice from inv(img)')
        
    
    if 0: # debug    
        
        cpa_space=tw.ms.L_cpa_space[level]
        if eval_v:
            vx=tw.v_dense.cpu[:,0].reshape(cpa_space.x_dense_grid_img.shape[1:])
            vy=tw.v_dense.cpu[:,1].reshape(cpa_space.x_dense_grid_img.shape[1:])
            vz=tw.v_dense.cpu[:,2].reshape(cpa_space.x_dense_grid_img.shape[1:])
        
        
            plt.figure()
            plt.imshow(vz[:,:,17],interpolation="Nearest");plt.colorbar()
            plt.title('vz in some slice')
     
    return tw
예제 #16
0
disp0 = np.asarray([ii[1] for ii in tp]).flatten()
us0 = disp0[2::3]

ep = vj.EpochalDisplacement('cumu_post_with_seafloor.h5',
                            filter_sites=sites)
disp1 = ep[0]
us1 = disp1[2::3]

plt.subplot(121)
bm = vj.MyBasemap(region_code='near')
mplt = vj.MapPlotDisplacement(basemap=bm)
mplt.plot_scalor(us0, sites, cmap='RdBu')
mplt = vj.MapPlotSlab(basemap=bm)
mplt.plot_top()
plt.clim([-1., 1.])

plt.subplot(122)
bm = vj.MyBasemap(region_code='near')
mplt = vj.MapPlotDisplacement(basemap=bm)
im = mplt.plot_scalor(us1, sites, cmap='RdBu')
mplt = vj.MapPlotSlab(basemap=bm)
mplt.plot_top()
plt.clim([-1., 1.])

divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)

plt.savefig('compare.pdf')
plt.show()
def train(n_epochs, _batch_size, start_epoch=0):
    """
        train with fixed batch_size for given epochs
        make some example plots and save model after each epoch
    """
    global batch_size
    batch_size = _batch_size
    # create a dataqueue with the keras facilities. this allows
    # to prepare the data in parallel to the training
    sample_dataqueue = GeneratorEnqueuer(generate_real_samples(batch_size),
                                         use_multiprocessing=True)
    sample_dataqueue.start(workers=2, max_queue_size=10)
    sample_gen = sample_dataqueue.get()

    # targets for loss function
    gan_sample_dataqueue = GeneratorEnqueuer(
        generate_latent_points_as_generator(batch_size),
        use_multiprocessing=True)
    gan_sample_dataqueue.start(workers=2, max_queue_size=10)
    gan_sample_gen = gan_sample_dataqueue.get()

    # targets for loss function
    valid = -np.ones((batch_size, 1))
    fake = np.ones((batch_size, 1))
    dummy = np.zeros((batch_size, 1))  # Dummy gt for gradient penalty

    bat_per_epo = int(n_samples / batch_size)

    # we need to call the discriminator once in order
    # to initialize the input shapes
    [X_real, cond_real] = next(sample_gen)
    latent = np.random.normal(size=(batch_size, latent_dim))
    critic_model.predict([X_real, cond_real, latent])
    for i in trange(n_epochs):
        epoch = 1 + i + start_epoch
        # enumerate batches over the training set
        for j in trange(bat_per_epo):

            for _ in range(n_disc):
                # fetch a batch from the queue
                [X_real, cond_real] = next(sample_gen)
                latent = np.random.normal(size=(batch_size, latent_dim))
                d_loss = critic_model.train_on_batch(
                    [X_real, cond_real, latent], [valid, fake, dummy])
                # we get for losses back here. average, valid, fake, and gradient_penalty
                # we want the average of valid and fake
                d_loss = np.mean([d_loss[1], d_loss[2]])

            # train generator
            # prepare points in latent space as input for the generator
            [latent, cond] = next(gan_sample_gen)
            # update the generator via the discriminator's error
            g_loss = generator_model.train_on_batch([latent, cond], valid)
            # summarize loss on this batch
            print(f'{epoch}, {j + 1}/{bat_per_epo}, d_loss {d_loss}' + \
                  f' g:{g_loss} ')  # , d_fake:{d_loss_fake} d_real:{d_loss_real}')

            if np.isnan(g_loss) or np.isnan(d_loss):
                raise ValueError('encountered nan in g_loss and/or d_loss')

            hist['d_loss'].append(d_loss)
            hist['g_loss'].append(g_loss)

        # plot generated examples
        plt.figure(figsize=(25, 25))
        n_plot = 30
        X_fake, cond_fake = generate_fake_samples(n_plot)
        for iplot in range(n_plot):
            plt.subplot(n_plot, 25, iplot * 25 + 1)
            plt.imshow(cond_fake[iplot, :, :].squeeze(),
                       cmap=plt.cm.gist_earth_r,
                       norm=LogNorm(vmin=0.01, vmax=1))
            plt.axis('off')
            for jplot in range(1, 24):
                plt.subplot(n_plot, 25, iplot * 25 + jplot + 1)
                plt.imshow(X_fake[iplot, jplot, :, :].squeeze(),
                           vmin=0,
                           vmax=1,
                           cmap=plt.cm.hot_r)
                plt.axis('off')
        plt.colorbar()
        plt.suptitle(f'epoch {epoch:04d}')
        plt.savefig(
            f'{plotdir}/fake_samples_{params}_{epoch:04d}_{j:06d}.{plot_format}'
        )

        # plot loss
        plt.figure()
        plt.plot(hist['d_loss'], label='d_loss')
        plt.plot(hist['g_loss'], label='g_loss')
        plt.ylabel('batch')
        plt.legend()
        plt.savefig(f'{plotdir}/training_loss_{params}.{plot_format}')
        pd.DataFrame(hist).to_csv('hist.csv')
        plt.close('all')

        generator.save(f'{outdir}/gen_{params}_{epoch:04d}.h5')
        critic.save(f'{outdir}/disc_{params}_{epoch:04d}.h5')
예제 #18
0
파일: plotting.py 프로젝트: 0x0all/rep
 def _plot(self):
     colormap = plt.pcolor(self.x, self.y, self.z, cmap=self.cmap)
     cb = plt.colorbar(colormap)
     cb.set_label('value')
예제 #19
0
    def plotPCA( 
        self, 
        plotTitleIdentifier, 
        dataDir, 
        eigenVectorFile , 
        eigenValueFile = False , 
        eigenVectorCount = 4, 
        plotDistibution = True, 
        limits = False 
    ):
    
        # If this is the first plot, create grid
        if self.subPlots == 0:
            self.createPdfPage()
    
        # User info
        print "Doing principal component analysis"
    
        # Do the four principal vectors
        frames = []

        # Principal components
        pcs = []
        for i in range( 0, eigenVectorCount ):
            pcs.append([])
    
        # Go through analysis file and get eigenvalues
        if eigenValueFile != False:
            eigenValues = []
            eigenValueTotal = 0
            with open(dataDir+eigenValueFile,"r") as f:
                for line in f:
                    temp = line.split()
                    eigenValueTotal += float(temp[1])
                    eigenValues.append(float(temp[1]))
            eigenValues = (np.array(eigenValues) / eigenValueTotal) * 100               
    
        # Get the file with all the projection data
        pcaFile = open(dataDir+eigenVectorFile,"r")
        n = 0
        for aline in pcaFile:
            if n > 1 and aline:
                values = aline.split()
                
                # Add frames                
                frames.append( int(values[0]) )
                
                # If requesting more vectors than present
                if eigenVectorCount > len(values):
                    raise Exception("CPPTRAJ has not projected "+str(eigenVectorCount)+" vectors")
                
                # Add to vectors
                for i in range( 0, eigenVectorCount ):
                    pcs[i].append( float(values[i+1]) )
            n = n + 1
    
        # Create numpy arrays
        frames = np.array( frames )
        self.np_arrays = [ np.array( pc ) for pc in pcs ]
        
        # Set the plotting font and default size 'family' : 'Arial',
        font = {
                'weight' : 'normal',
                'size'   : 10}
                
        # Do a plot for each PCA
        for component in range( 1, len(self.np_arrays) ):
            
            # User Info        
            print "Plotting component 1 vs. "+str(component)
        
            # Normalize the data DeltaG = -kb T * [ ln( P(v1,v2) ) - ln Pmax ]
            boltzman = 0.0019872041
            temperature = 300
            
            # Plot both distribution & Energy Landscape
            for plotType in [ "energy", "distribution" ] if plotDistibution else [ "energy" ]:        
            
                # New subplot
                ax = self.getActiveAx()
                
                # Increase subplot counter
                self.subPlots += 1
    
                # Do the plotting
                if plotType == "energy":
                    
                    # Create the histogram without plotting, so we can set the units properly        
                    H, xedges, yedges = np.histogram2d(self.np_arrays[component], self.np_arrays[0], bins=100 )
                    H_normalized = H/len(self.np_arrays[0])
                    H = -1 * boltzman * temperature * (np.log( H_normalized )-np.log(np.max(H_normalized)))
                    
                    # Set max energy
                    for vec in H:
                        for val in vec:
                            if not np.isinf(val) and val > self.latestMax:
                                self.latestMax = val                   
                    
                    # Now plot the 2d histogram
                    img = ax.imshow(H,  interpolation='nearest', origin='lower',extent=[yedges[0], yedges[-1],xedges[0], xedges[-1]] , rasterized=True )
                    
                    # create an axes on the right side of ax. The width of cax will be 5%
                    # of ax and the padding between cax and ax will be fixed at 0.05 inch.
                    divider = make_axes_locatable(ax)
                    cax = divider.append_axes("right", size="5%", pad=0.05)                    
                    
                    # Create colorbar
                    colorbar = plt.colorbar(img, ax=ax, cax = cax)
                    colorbar.set_label("Kcal / mol")
                    self.colorBars.append(colorbar)
                    
                elif plotType == "distribution":
            
                    # Directly do the 2d histogram of matplotlib        
                    _, _, _, img = ax.hist2d(self.np_arrays[0], self.np_arrays[component], bins=100 , rasterized=True, norm=LogNorm() )
                    colorbar = plt.colorbar(img, ax=ax)
                    colorbar.set_label("Occurances")
                    self.colorBars.append(colorbar)
            
                # Set limits if they are not specified
                if limits == False:
                    print "Calculating plot limits based on data"
                    mini = np.abs(np.min( [np.min(self.np_arrays[0]), np.min(self.np_arrays[component])] ))  
                    maxi = np.abs(np.max( [np.max(self.np_arrays[0]), np.max(self.np_arrays[component])] ))
                    limits = int(math.ceil(np.max( [mini,maxi] )))
                   
                print "Setting plot limits to: ",limits
                ax.set_ylim([-limits,limits])
                ax.set_xlim([-limits,limits]) 
                
                # Save the limits for the component
                with open(dataDir+"pca_limits_"+str(component), "w") as fo:
                    fo.write( str(limits) )
            
                # Set title, labels etc
                plt.legend()
                if eigenValueFile != False:
                    ax.set_xlabel("PC1 ({0:.2f}%)".format(eigenValues[0]), fontsize=12)
                    ax.set_ylabel("PC"+str(component+1)+" ({0:.2f}%)".format(eigenValues[component]), fontsize=12)
                else:
                    ax.set_xlabel("PC1", fontsize=12)
                    ax.set_ylabel("PC"+str(component+1), fontsize=12)
                
                ax.set_title( "PCA. "+plotTitleIdentifier )
                plt.rc('font', **font)   
        
                # Save pdf page if it's filled
                if self.subPlots >= (self.rows*self.columns):
                    print "Now saving to PDF. Number of plots: ",self.subPlots
                    self.savePdfPage()
                    self.subPlots = 0
예제 #20
0
signals = extract_activity_signals(activity, resample='existing')
power = signals['power']
balance = signals['left_right_balance']
time = signals['time']
PAvgBalance = sum(power * balance) / sum(power)

# get session info
records = activity.get_records_by_type('session')
for record in records:
    valid_field_names = record.get_valid_field_names()

# plotting
CrossPlotFig = plt.figure()
sc = plt.scatter(power,
                 balance,
                 s=5,
                 c=time,
                 cmap=plt.get_cmap('brg'),
                 edgecolors='face')
plt.colorbar(orientation='horizontal')
plt.title('Balance Vs Power over Time (sec)\n' \
            + 'power-weighted average = %4.1f' % (PAvgBalance) )
plt.xlabel('Power (w)')
plt.ylabel('Right Balance (%)')
plt.grid(b=True, which='major', axis='both')
ax = plt.gca()
grids = arange(10, 100, 10)  # force a gride at 50
ax.set_yticks(grids, minor=False)
ax.grid(True)
plt.show()
예제 #21
0
def link_level_hour(levels, usages, quantiles, scheme, direction, color, nnames, lnames, admat=None):
    """
    Make a color mesh of a node's average hourly usage of links at different
    levels.
    """
    if not admat:
        admat = np.genfromtxt('./settings/eadmat.txt')
    if color == 'solar':
        cmap = Oranges_cmap
    elif color == 'wind':
        cmap = Blues_cmap
    elif color == 'backup':
        cmap = 'Greys'
    links, nodes, lapse = usages.shape
    usages = np.reshape(usages, (links, nodes, lapse / 24, 24))
    totalHour = np.zeros((levels, 24))
    totalNormed = np.zeros((levels, 24))
    for node in range(nodes):
        nl = neighbor_levels(node, levels, admat)
        hourSums = np.zeros((levels, 24))
        for lvl in range(levels):
            ll = link_level(nl, lvl, nnames, lnames)
            ll = np.array(ll, dtype='int')
            meanSum = np.sum(np.mean(usages[ll, node], axis=1), axis=0)
            linkSum = sum(quantiles[ll])
            hourSums[lvl] = meanSum / linkSum
        totalHour += hourSums

        plt.figure(figsize=(9, 3))
        ax = plt.subplot()
        plt.pcolormesh(hourSums, cmap=cmap)
        plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
        ax.set_yticks(np.linspace(.5, levels - .5, levels))
        ax.set_yticklabels(range(1, levels + 1))
        ax.yaxis.set_tick_params(width=0)
        ax.xaxis.set_tick_params(width=0)
        ax.set_xticks(np.linspace(.5, 23.5, 24))
        ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
        plt.ylabel('Link level')
        plt.axis([0, 24, 0, levels])
        plt.title(nnames[node] + ' ' + direction + ' ' + color)
        plt.savefig(figPath + '/hourly/' + str(scheme) + '/' + str(node) + '_' + color + '_' + direction + '.pdf', bbox_inches='tight')
        plt.close()

        hourSums = hourSums / np.sum(hourSums, axis=1)[:, None]
        totalNormed += hourSums
        plt.figure(figsize=(9, 3))
        ax = plt.subplot()
        plt.pcolormesh(hourSums, cmap=cmap)
        plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
        ax.set_yticks(np.linspace(.5, levels - .5, levels))
        ax.set_yticklabels(range(1, levels + 1))
        ax.yaxis.set_tick_params(width=0)
        ax.xaxis.set_tick_params(width=0)
        ax.set_xticks(np.linspace(.5, 23.5, 24))
        ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
        plt.ylabel('Link level')
        plt.axis([0, 24, 0, levels])
        plt.title(nnames[node] + ' ' + direction + ' ' + color)
        plt.savefig(figPath + '/hourly/' + str(scheme) + '/normed/' + str(node) + '_' + color + '_' + direction + '.pdf', bbox_inches='tight')
        plt.close()

    # Plot average hourly usage
    totalHour /= nodes
    plt.figure(figsize=(9, 3))
    ax = plt.subplot()
    plt.pcolormesh(totalHour, cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(.5, 23.5, 24))
    ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.axis([0, 24, 0, levels])
    plt.savefig(figPath + '/hourly/' + str(scheme) + '/total_' + color + '_' + direction + '.pdf', bbox_inches='tight')
    plt.close()

    totalNormed /= nodes
    plt.figure(figsize=(9, 3))
    ax = plt.subplot()
    plt.pcolormesh(totalNormed, cmap=cmap)
    plt.colorbar().set_label(label=r'$U_n^{(l)}$', size=11)
    ax.set_yticks(np.linspace(.5, levels - .5, levels))
    ax.set_yticklabels(range(1, levels + 1))
    ax.yaxis.set_tick_params(width=0)
    ax.xaxis.set_tick_params(width=0)
    ax.set_xticks(np.linspace(.5, 23.5, 24))
    ax.set_xticklabels(np.array(np.linspace(1, 24, 24), dtype='int'), ha="center", va="top", fontsize=10)
    plt.ylabel('Link level')
    plt.axis([0, 24, 0, levels])
    plt.savefig(figPath + '/hourly/' + str(scheme) + '/normed/total_' + color + '_' + direction + '.pdf', bbox_inches='tight')
    plt.close()
예제 #22
0
mpl_dates[:10]


# In[20]:


plt.figure(figsize=(8, 4))
plt.scatter(dax['PCA_5'], dax['^GDAXI'], c=mpl_dates)
lin_reg = np.polyval(np.polyfit(dax['PCA_5'],
                                dax['^GDAXI'], 1),
                                dax['PCA_5'])
plt.plot(dax['PCA_5'], lin_reg, 'r', lw=3)
plt.grid(True)
plt.xlabel('PCA_5')
plt.ylabel('^GDAXI')
plt.colorbar(ticks=mpl.dates.DayLocator(interval=250),
                format=mpl.dates.DateFormatter('%d %b %y'))
# tag: pca_3
# title: DAX return values against PCA return values with linear regression


# In[21]:


cut_date = '2017-3-1'
early_pca = dax[dax.index < cut_date]['PCA_5']
early_reg = np.polyval(np.polyfit(early_pca,
                dax['^GDAXI'][dax.index < cut_date], 1),
                early_pca)


# In[22]:
예제 #23
0
ax1.legend(legend, loc='upper left')
ax1.set_title('5Y5Y X-Market')

# Correlation Plot
#ax2 = sns.heatmap(df_5y5y.corr(), xticklabels=legend, yticklabels=legend, cmap='RdYlGn', center=0, annot=True)
spread = df['PLN 5Y X 5Y Fwd Swap Rate'] - avg_5y5y
ax2.plot(spread)

# Decorations
#ax2.title('Historical Correlations', fontsize=22)
#ax2.xticks(fontsize=8)
#ax2.yticks(fontsize=4)

print(df_5y5y.corr())

x1 = avg_5y5y
y1 = df_5y5y['PLN 5Y X 5Y Fwd Swap Rate']

ax4 = sns.regplot(x=x1, y=y1, marker="+")
#plt.show()

colors = np.linspace(0.1, 1, len(df_5y5y))
mymap = plt.get_cmap("winter")
ax3 = ax3.scatter(x1, y1, c=colors, cmap=mymap, lw=0)
cb = plt.colorbar(ax3)
#plt.subplot(x1[-1],y1[-1],'ro')
cb.ax.set_yticklabels(
    [str(p.date()) for p in df_5y5y[::len(df_5y5y) // 10].index])
fig.tight_layout()
plt.show()
예제 #24
0
def plot_variable(u, name, direc, cmap=cmaps.parula, scale='lin', numLvls=100,
                  umin=None, umax=None, \
                  tp=False, \
                  tpAlpha=1.0, show=False,
                  hide_ax_tick_labels=False, label_axes=True, title='',
                  use_colorbar=True, hide_axis=False, colorbar_loc='right'):
  """
    show -- whether to show the plot on the screen 
    tp -- show triangle
    cmap -- colors:
      gist_yarg - grey 
      gnuplot, hsv, gist_ncar
      jet - typical colors
  """
  mesh = u.function_space().mesh()
  v    = u.compute_vertex_values(mesh)
  x    = mesh.coordinates()[:,0]
  y    = mesh.coordinates()[:,1]
  t    = mesh.cells()
  

  if not os.path.isdir( direc ): 
      os.makedirs(direc)
 
  full_path = os.path.join(direc, name)

  if umin != None:
    vmin = umin
  else:
    vmin = v.min()
  if umax != None:
    vmax = umax
  else:
    vmax = v.max()

  # countour levels :
  if scale == 'log':
    v[v < vmin] = vmin + 1e-12
    v[v > vmax] = vmax - 1e-12
    from matplotlib.ticker import LogFormatter
    levels      = np.logspace(np.log10(vmin), np.log10(vmax), numLvls)
    
    tick_numLvls = min( numLvls, 8 )
    tick_levels = np.logspace(np.log10(vmin), np.log10(vmax), tick_numLvls)
    
    formatter   = LogFormatter(10, labelOnlyBase=False)
    norm        = colors.LogNorm()

  elif scale == 'lin':
    v[v < vmin] = vmin + 1e-12
    v[v > vmax] = vmax - 1e-12
    from matplotlib.ticker import ScalarFormatter
    levels    = np.linspace(vmin, vmax, numLvls)
    
    tick_numLvls = min( numLvls, 8 )
    tick_levels = np.linspace(vmin, vmax, tick_numLvls)
    
    formatter = ScalarFormatter()
    norm      = None

  elif scale == 'bool':
    from matplotlib.ticker import ScalarFormatter
    levels    = [0, 1, 2]
    formatter = ScalarFormatter()
    norm      = None

  fig = plt.figure(figsize=(5,5))
  ax  = fig.add_subplot(111)

  c = ax.tricontourf(x, y, t, v, levels=levels, norm=norm,
                     cmap=plt.get_cmap(cmap))
  plt.axis('equal')

  if tp == True:
    p = ax.triplot(x, y, t, '-', lw=0.2, alpha=tpAlpha)
  ax.set_xlim([x.min(), x.max()])
  ax.set_ylim([y.min(), y.max()])
  if label_axes:
    ax.set_xlabel(r'$x$')
    ax.set_ylabel(r'$y$')
  if hide_ax_tick_labels:
    ax.set_xticklabels([])
    ax.set_yticklabels([])
  if hide_axis:
    plt.axis('off')

  # include colorbar :
  if scale != 'bool' and use_colorbar:
    divider = make_axes_locatable(plt.gca())
    cax  = divider.append_axes(colorbar_loc, "5%", pad="3%")
    cbar = plt.colorbar(c, cax=cax, format=formatter,
                        ticks=tick_levels)
    tit = plt.title(title)

  if use_colorbar:
    plt.tight_layout(rect=[.03,.03,0.97,0.97])
  else:
    plt.tight_layout()
  plt.savefig( full_path + '.eps', dpi=300)
  if show:
    plt.show()
  plt.close(fig)
예제 #25
0
파일: visualize.py 프로젝트: isall/unmixing
    def plot_feature_space(self,
                           m=0,
                           n=1,
                           c=None,
                           r=300,
                           hold=False,
                           xlim=None,
                           ylim=None,
                           xtpl='MNF %d',
                           ytpl='MNF %d',
                           alpha=0.5,
                           stitle='MNF Feature Space: Axes %d and %d',
                           interact=False):
        '''
        Create a 2D projection of the feature space and display it.
        '''
        self.__dims__ = (m, n, c)  # Remember these indices

        # Create a new figure of size 9x9 points, using 72 dots per inch
        fig = figure(figsize=self.size, dpi=self.dpi)
        self.ax = fig.add_subplot(111)
        defaults = {
            'linewidths': (0, ),
            's': (30, ),
            'cmap': 'YlGnBu',
            'alpha': alpha
        }

        if self.__raveled__:
            if c is not None:
                self.ax.scatter(self.rfeatures[:, m],
                                self.rfeatures[:, n],
                                c=self.rfeatures[:, c],
                                **defaults)

            else:
                self.ax.scatter(self.rfeatures[:, m], self.rfeatures[:, n],
                                **defaults)

        else:
            i = j = r  # Select square subsets
            if c is not None:
                # Plot the data; if a third dimension in color is requested...
                self.ax.scatter(self.rfeatures[0:i, 0:j, m],
                                self.rfeatures[0:i, 0:j, n],
                                c=self.rfeatures[0:i, 0:j, c],
                                **defaults)

            else:
                self.ax.scatter(self.rfeatures[0:i, 0:j, m],
                                self.rfeatures[0:i, 0:j, n], **defaults)

        if c is not None:
            plt.colorbar(orientation='vertical')
            t = '2D Projection with Axis %d in Color' % (c + 1)

        else:
            t = '2D Projection'

        # Allow users to change the x- and y-axis limits
        axes = plt.gca()
        if xlim is not None:
            axes.set_xlim(xlim)

        if ylim is not None:
            axes.set_ylim(ylim)

        plt.xlabel(xtpl % (m + 1), fontsize=14)
        plt.ylabel(ytpl % (n + 1), fontsize=14)
        plt.suptitle(stitle % (m + 1, n + 1), fontsize=18, fontweight='bold')
        plt.title(t, fontsize=16)

        if not hold:
            plt.show()

        if not hold and interact:
            self.on_reset()
            self.ax.figure.canvas.mpl_connect('button_press_event',
                                              self.on_press)
            return self.ax
예제 #26
0
 def plot_culture(self):
     # plt.figure()
     plt.matshow(self.get_matrix_of_agents_culture())
     plt.colorbar()
     plt.clim(0, 1)  # Sets the min/max limits of colorbar
     plt.title("Culture")
n_plot = 30
[X_real, cond_real] = next(generate_real_samples(n_plot))
for i in range(n_plot):
    plt.subplot(n_plot, 25, i * 25 + 1)
    plt.imshow(cond_real[i, :, :].squeeze(),
               cmap=plt.cm.gist_earth_r,
               norm=LogNorm(vmin=0.01, vmax=1))
    plt.axis('off')
    for j in range(1, 24):
        plt.subplot(n_plot, 25, i * 25 + j + 1)
        plt.imshow(X_real[i, j, :, :].squeeze(),
                   vmin=0,
                   vmax=1,
                   cmap=plt.cm.hot_r)
        plt.axis('off')
plt.colorbar()
plt.savefig(f'{plotdir}/real_samples.{plot_format}')

hist = {'d_loss': [], 'g_loss': []}
print(f'start training on {n_samples} samples')


def train(n_epochs, _batch_size, start_epoch=0):
    """
        train with fixed batch_size for given epochs
        make some example plots and save model after each epoch
    """
    global batch_size
    batch_size = _batch_size
    # create a dataqueue with the keras facilities. this allows
    # to prepare the data in parallel to the training
예제 #28
0
def example(tess='I',
            base=[2, 2, 2],
            nLevels=1,
            zero_v_across_bdry=[True] * 3,
            vol_preserve=False,
            nRows=100,
            nCols=100,
            nSlices=100,
            use_mayavi=False,
            eval_v=False,
            eval_cell_idx=False):

    tw = TransformWrapper(nRows=nRows,
                          nCols=nCols,
                          nSlices=nSlices,
                          nLevels=nLevels,
                          base=base,
                          zero_v_across_bdry=zero_v_across_bdry,
                          tess=tess,
                          valid_outside=False,
                          only_local=False,
                          vol_preserve=vol_preserve)

    print_iterable(tw.ms.L_cpa_space)
    print tw

    # create some fake 3D image.
    img = np.zeros((nCols, nRows, nSlices), dtype=np.float64)

    #    img[:]=np.random.random_integers(0,255,img.shape)

    # Fill the image with the x coordinates as fake values
    img[:] = tw.pts_src_dense.cpu[:, 0].reshape(img.shape)

    img0 = CpuGpuArray(img.copy().astype(np.float64))
    img_wrapped_fwd = CpuGpuArray.zeros_like(img0)
    img_wrapped_inv = CpuGpuArray.zeros_like(img0)

    seed = 0
    np.random.seed(seed)

    ms_Avees = tw.get_zeros_PA_all_levels()
    ms_theta = tw.get_zeros_theta_all_levels()

    if tess == 'II':
        for level in range(tw.ms.nLevels):
            cpa_space = tw.ms.L_cpa_space[level]
            Avees = ms_Avees[level]
            #            1/0
            if level == 0:
                tw.sample_gaussian(level,
                                   ms_Avees[level],
                                   ms_theta[level],
                                   mu=None)  # zero mean
                #                ms_theta[level].fill(0)
                #                ms_theta[level][-4]=10
                cpa_space.theta2Avees(theta=ms_theta[level], Avees=Avees)
            else:
                tw.sample_from_the_ms_prior_coarse2fine_one_level(
                    ms_Avees, ms_theta, level_fine=level)
    else:
        # For tess='I' in 3D, I have yet to implement the coarse-to-fine sampling.
        for level in range(tw.ms.nLevels):
            cpa_space = tw.ms.L_cpa_space[level]
            velTess = cpa_space.zeros_velTess()
            ms_Avees[level].fill(0)
            Avees = ms_Avees[level]
            tw.sample_gaussian_velTess(level, Avees, velTess, mu=None)

    print 'img shape:', img0.shape

    # You don't have use these. You can use any 2d array
    # that has 3 columns (regardless of the number of rows).
    pts_src = tw.pts_src_dense
    pts_src = CpuGpuArray(pts_src.cpu[::1].copy())

    # Create a buffer for the output
    pts_fwd = CpuGpuArray.zeros_like(pts_src)
    pts_inv = CpuGpuArray.zeros_like(pts_src)

    for level in range(tw.ms.nLevels):
        tw.update_pat_from_Avees(ms_Avees[level], level)

        if eval_v:
            # Evaluating the velocity field.
            # You don't have to do it in unless you want to visualize v.
            # (when evaluting the treansformation, v will be internally
            # evaluated anyway -- but its result won't be stored)
            tw.calc_v(level=level)

        print 'level', level
        print
        print 'number of points:', len(pts_src)
        print 'number of cells:', tw.ms.L_cpa_space[level].nC

        # optional, if you want to time it
        timer_gpu_T_fwd = GpuTimer()

        # Simply calling
        #   tic = time.clock()
        # and then
        #   tic = time.clock()
        # won't work.
        # In fact, most likely you will get that toc-tic is zero.
        # You need to use the GpuTimer object. When you do that,
        # one side effect is that suddenly the toc-tic from above will
        # give you a more realistic result.

        tic = time.clock()
        timer_gpu_T_fwd.tic()
        tw.calc_T_fwd(pts_src, pts_fwd, level=level)
        timer_gpu_T_fwd.toc()
        toc = time.clock()

        print 'Time, in sec, for computing T_fwd:'
        print timer_gpu_T_fwd.secs
        print toc - tic  # likely to be 0, unless you also used the GpuTimer.

        # You can also time the inv of course. Results will be similar.
        tw.calc_T_inv(pts_src, pts_inv, level=level)

        if eval_cell_idx:
            # cell_idx is computed here just for display.
            cell_idx = CpuGpuArray.zeros(len(pts_src), dtype=np.int32)
            tw.calc_cell_idx(pts_src, cell_idx, level)

        tw.remap_fwd(pts_inv, img0, img_wrapped_fwd)
        tw.remap_inv(pts_fwd, img0, img_wrapped_inv)

        # For display purposes, do gpu2cpu transfer
        print "For display purposes, do gpu2cpu transfer"

        if eval_cell_idx:
            cell_idx.gpu2cpu()
        if eval_v:
            tw.v_dense.gpu2cpu()
        pts_fwd.gpu2cpu()
        pts_inv.gpu2cpu()
        img_wrapped_fwd.gpu2cpu()
        img_wrapped_inv.gpu2cpu()

        if use_mayavi:
            ds = 1  # downsampling factor
            i = 17
            pts_src_grid = pts_src.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_src_ds = pts_src_grid[::ds, ::ds, i].reshape(-1, 3)
            pts_fwd_grid = pts_fwd.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_fwd_ds = pts_fwd_grid[::ds, ::ds, i].reshape(-1, 3)
            pts_inv_grid = pts_inv.cpu.reshape(tw.nRows, tw.nCols, -1, 3)
            pts_inv_ds = pts_inv_grid[::ds, ::ds, i].reshape(-1, 3)

            from of.my_mayavi import *
            mayavi_mlab_close_all()
            mayavi_mlab_figure_bgwhite('src')
            x, y, z = pts_src_ds.T
            mayavi_mlab_plot3d(x, y, z)
            mayavi_mlab_figure_bgwhite('fwd')
            x, y, z = pts_fwd_ds.T
            mayavi_mlab_plot3d(x, y, z)

        figsize = (12, 12)
        plt.figure(figsize=figsize)
        i = 17  # some slice
        plt.subplot(131)
        plt.imshow(img0.cpu[:, :, i].astype(np.uint8), interpolation="Nearest")
        plt.title('slice from img')
        plt.subplot(132)
        plt.imshow(img_wrapped_fwd.cpu[:, :, i].astype(np.uint8),
                   interpolation="Nearest")
        plt.axis('off')
        plt.title('slice from fwd(img)')
        plt.subplot(133)
        plt.imshow(img_wrapped_inv.cpu[:, :, i].astype(np.uint8),
                   interpolation="Nearest")
        plt.axis('off')
        plt.title('slice from inv(img)')

    if 0:  # debug

        cpa_space = tw.ms.L_cpa_space[level]
        if eval_v:
            vx = tw.v_dense.cpu[:, 0].reshape(
                cpa_space.x_dense_grid_img.shape[1:])
            vy = tw.v_dense.cpu[:, 1].reshape(
                cpa_space.x_dense_grid_img.shape[1:])
            vz = tw.v_dense.cpu[:, 2].reshape(
                cpa_space.x_dense_grid_img.shape[1:])

            plt.figure()
            plt.imshow(vz[:, :, 17], interpolation="Nearest")
            plt.colorbar()
            plt.title('vz in some slice')

    return tw
예제 #29
0
def plot_const(u, name , direc): 

   if not os.path.isdir( direc ): 
      os.makedirs(direc)
 
   full_path = os.path.join(direc, name)
   full_mesh = os.path.join(direc, 'mesh.svg')
   
   mesh = u.function_space().mesh() 
   v    = u.compute_vertex_values(mesh)
   x    = mesh.coordinates()[:,0]
   y    = mesh.coordinates()[:,1]
   t    = mesh.cells() 
   
   vmin = v.min()
   vmax = v.max() 
   
   v[v < vmin] = vmin + 1e-12
   v[v > vmax] = vmax - 1e-12
   numLvls=100
   from matplotlib.ticker import ScalarFormatter
   levels    = np.linspace(vmin, vmax, numLvls)
   
   tick_numLvls = min( numLvls, 8 )
   tick_levels = np.linspace(vmin, vmax, tick_numLvls)
    
   formatter = ScalarFormatter()
   norm      = None
   
   n = mesh.num_vertices()
   d = mesh.geometry().dim()

   # Create the triangulation
   mesh_coordinates = mesh.coordinates().reshape((n, d))
   triangles = np.asarray([cell.entities(0) for cell in cells(mesh)])
   triangulation = tri.Triangulation(mesh_coordinates[:, 0],
                                  mesh_coordinates[:, 1],
                                  triangles)

   # Plot the mesh
#   plt.figure()
#   plt.triplot(triangulation)
#   plt.savefig( full_mesh ) 

   
   
   # Plot of scalar field
   V = u.function_space() #FunctionSpace(mesh, 'CG', 2)
   #f_exp = Expression('sin(2*pi*(x[0]*x[0]+x[1]*x[1]))')
   #f = interpolate(f_exp, V)

   fig = plt.figure(figsize=(5,5))
   ax  = fig.add_subplot(111)

   # Get the z values for each vertex
   #   cmap = plt.cm.jet 
   
   cmap = cmaps.parula 
   c = ax.tricontourf(x, y, t, v, levels=levels, norm=norm,
                     cmap=plt.get_cmap(cmap))
   
   plt.ioff()
   #fig = plt.figure()
   z = np.asarray([u(point) for point in mesh_coordinates])
   plt.tripcolor(triangulation, z, cmap=cmap)  # alt plt.tricontourf(...)
   plt.colorbar()
#   plt.savefig( full_path, bbox_inches='tight' )
   plt.savefig( full_path + '.eps', dpi=300)
   plt.close( fig )
     sub = df.query('hour==@hour')
     _, p = scipy.stats.ks_2samp(
         sub.query('cond==1')['fraction'],
         sub.query('cond==2')['fraction'])
     pvals_per_hour.append(p)
 np.savetxt(
     f'{plotdir}/check_conditional_dist_samenoise_KSpval{params}_{epoch:04d}_{isample:04d}.txt',
     pvals_per_hour)
 for showfliers in (True, False):
     fig = plt.figure(constrained_layout=True, figsize=(6, 4.8))
     gs = fig.add_gridspec(2, 2)
     ax1 = fig.add_subplot(gs[0, 0])
     im = ax1.imshow(cond1.squeeze(), cmap=cmap, norm=plotnorm)
     plt.title('cond 1')
     plt.axis('off')
     plt.colorbar(im)
     ax2 = fig.add_subplot(gs[0, 1])
     im = ax2.imshow(cond2.squeeze(), cmap=cmap, norm=plotnorm)
     plt.title('cond 2')
     plt.axis('off')
     plt.colorbar(im)
     ax3 = fig.add_subplot(gs[1, :])
     sns.boxplot('hour',
                 'fraction',
                 hue='cond',
                 data=df,
                 ax=ax3,
                 showfliers=showfliers)
     sns.despine()
     plt.savefig(
         f'{plotdir}/check_conditional_dist_samenoise_showfliers{showfliers}_{params}_{epoch:04d}_{isample:04d}.svg'
예제 #31
0
sites = [ii[0] for ii in tp]
disp0 = np.asarray([ii[1] for ii in tp]).flatten()
us0 = disp0[2::3]

ep = vj.EpochalDisplacement('cumu_post_with_seafloor.h5', filter_sites=sites)
disp1 = ep[0]
us1 = disp1[2::3]

plt.subplot(121)
bm = vj.MyBasemap(region_code='near')
mplt = vj.MapPlotDisplacement(basemap=bm)
mplt.plot_scalor(us0, sites, cmap='RdBu')
mplt = vj.MapPlotSlab(basemap=bm)
mplt.plot_top()
plt.clim([-1., 1.])

plt.subplot(122)
bm = vj.MyBasemap(region_code='near')
mplt = vj.MapPlotDisplacement(basemap=bm)
im = mplt.plot_scalor(us1, sites, cmap='RdBu')
mplt = vj.MapPlotSlab(basemap=bm)
mplt.plot_top()
plt.clim([-1., 1.])

divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)

plt.savefig('compare.pdf')
plt.show()
예제 #32
0
파일: pca.py 프로젝트: MathiasGruber/plmd
def runAnalysis( caseDirs , resultsDir , noReweight = False):
    
    # Do a reference for each one
    for refDir in caseDirs:

        # ID of reference case
        refID = refDir.split("/")[-1]
        
        # User info
        print "Doing PCA analysis with "+refDir+" as reference"
        
        # Get the PCA limits of component 1-2 plot
        limit = 10
        with open(refDir+"/analysis/data/pca_limits_1", "r") as fi:
            limit = int(float(fi.read()))
            limit += 0.01
        
        # Go through the case dirs to plot
        for caseDir in caseDirs:
            
            print "Using "+caseDir+" as case"
            
            # ID of case
            caseID = caseDir.split("/")[-1]
            
            ## PCA PLOTTING ON REF DIR PCA COMPONENTS
            #########################################
            
            # Create & run cpptraj for plotting all cases on the axes of the first eigenvector
            # Good URLs for PCA in CPPTRAJ:
            # http://archive.ambermd.org/201404/0243.html
                        
            # PCA plotter
            pcaHandler = pcaFuncs.PCA( 
                resultsDir+"/plots/pcaComparison/PCA_"+caseID+"_on_"+refID+".pdf"
            )    
            
            # Create new submission file
            TEMPLATE = open( caseDir+"/ccptraj_analysis_pca.ptraj", 'r')
            TEMP = TEMPLATE.read().replace("[PCAREFERENCE]", refDir  )
            TEMPLATE.close()
                                  
            # Write the submission file
            FILE = open(caseDir+"/ccptraj_analysis_pca.ptraj","w");        
            FILE.write( TEMP );
            FILE.close();
            
            # Run the cpptraj utility
            os.system( "$AMBERHOME/bin/cpptraj -p "+caseDir+"/md-files/peptide_nowat.prmtop -i "+caseDir+"/ccptraj_analysis_pca.ptraj" )
        
            # Do the plots of energy landscapes & distributions
            pcaHandler.plotPCA( 
                "Case: "+caseID+". Ref case: "+refID,   # Plot Title
                caseDir+"/analysis/data/" ,        # Data Dir
                "global_pca",                      # Eigenvector file
                eigenVectorCount = 2,              # Only plot two
                plotDistibution = False,           # Do not plot the distribution
                limits = limit
            )
            
            # Save the plot
            pcaHandler.savePlot()
            
            ## REWEIGHTING OF PCA PLOTS ON RED DIR PCA COMPONENTS
            #####################################################

            # Check if we should do a reweighted version
            if noReweight == False:
                if os.path.isfile( caseDir+"/md-logs/weights.dat" ):
                    
                    # User info
                    print "aMD weights found. Now attempting 2D reweighting"   
                    
                    # Prepare input file
                    numLines = 0
                    with open(caseDir+"/analysis/data/global_pca", "r") as fi:
                        with open(caseDir+"/analysis/data/global_pca_singleColumn", "w") as fo:
                            next(fi)
                            for line in fi:
                                numLines += 1
                                fo.write( line.split()[1]+"\t"+line.split()[2]+"\n" )

                    # Set the discretization
                    reqBins = 100         
                    discretization = (2*limit) / reqBins    
                    
                    # Get the max value of normal plot
                    maxValue = math.ceil(pcaHandler.getLatestMax())
                    
                    # Run the reweighting procedure
                    command = "python $PLMDHOME/src/PyReweighting/PyReweighting-2D.py \
                                -input "+caseDir+"/analysis/data/global_pca_singleColumn \
                                -name "+caseDir+"/analysis/data/global_pca_singleColumn_reweighted \
                                -Xdim -"+str(limit)+" "+str(limit)+" \
                                -Ydim -"+str(limit)+" "+str(limit)+" \
                                -discX "+str(discretization)+" \
                                -discY "+str(discretization)+" \
                                -cutoff 10 \
                                -Emax "+str(maxValue)+" \
                                -job amdweight_CE \
                                -weight "+refDir+"/md-logs/weights.dat | tee -a reweight_variable.log"
                    print "Running command:", command
                    os.system( command )
                    
                    # Create long file for PCA module
                    with open(caseDir+"/analysis/data/global_pca_reweightedDone", "w") as fo:
                        with open(caseDir+"/analysis/data/global_pca_singleColumn_reweighted-pmf-c2.dat", "r") as fi:
                            frame = 0
                            for line in fi:
                                temp = line.split()
                                entries = int(float(temp[2])*10)
                                for i in range(0,entries):
                                    fo.write( str(frame) + "\t" + temp[0] + "\t" + temp[1] +"\n" )
                                    frame += 1

                    # Print block analysis 'family' : 'Arial',
                    fig, ax = plt.subplots(figsize=(8, 8), nrows=1, ncols=1 )
                    font = {'weight' : 'normal','size' : 10}
                    plt.rc('font', **font)
                    
                    # Now plot the 2d histogram
                    hist = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2EnergyHist.npy")   
                    xedges = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2edgesX.npy")   
                    yedges = np.load(caseDir+"/analysis/data/global_pca_singleColumn_reweighted_c2edgesY.npy")   
                    
                    # Remove points above limit
                    for jy in range(len(hist[0,:])):
                        for jx in range(len(hist[:,0])):
                            if hist[jx,jy] >= maxValue:
                                hist[jx,jy] = float("inf")
                    
                    # Do plot
                    img = plt.imshow(hist.transpose(),  interpolation='nearest', origin='lower',extent=[yedges[0], yedges[-1],xedges[0], xedges[-1]] , rasterized=True )
                    
                    # create an axes on the right side of ax. The width of cax will be 5%
                    # of ax and the padding between cax and ax will be fixed at 0.05 inch.
                    divider = make_axes_locatable(ax)
                    cax = divider.append_axes("right", size="5%", pad=0.05)   
                    
                    # Create colorbar
                    colorbar = plt.colorbar(img, ax=ax, cax = cax)
                    colorbar.set_label("Kcal / mol")
                    
                    # Set title, labels etc
                    plt.legend()
                    ax.set_xlabel("PC1", fontsize=12)
                    ax.set_ylabel("PC2", fontsize=12)
                    
                    ax.set_title( "PCA. Case: "+caseID+" Reweighted. Ref case: "+refID )
                    plt.rc('font', **font) 
                    
                    # Save figure
                    fig.savefig(resultsDir+"/plots/pcaComparison/PCA_"+caseID+"_on_"+refID+"_reweighted.pdf")
                    

            ## CLUSTER PLOTS ON PCA COMPONENTS
            ##################################

            # Do both hier and dbscan
            for clusterType in ["dbscan","hier"]:            
                
                # Instantiate the class
                if os.path.isfile(caseDir+"/analysis/data/cluster_"+clusterType+"_out"):   
                    
                    print "Doing the "+clusterType+" cluster equivalent of the PCA plot"
                
                    # Start the cluster handler. Load the file declaring cluster for each frame
                    clusterHandler = cluster.clusterBase( caseDir+"/analysis/data/cluster_"+clusterType+"_out" )
                    
                    # Separate the dataset.
                    # global_pca is the projection file for this case on the ref modes
                    numPCAdataSets = clusterHandler.separateDataSet( 
                        caseDir+"/analysis/data/global_pca",            # Input file
                        caseDir+"/analysis/data/cluster_"+clusterType+"_pca_",   # Output files
                        xColumn = 1
                    ) 
                    
                    # Create lists of labels and files for plotting
                    clusterLabels = []
                    clusterFiles = []
                    offset = 1 if clusterType == "hier" else 0
                    for i in range( 0+offset, numPCAdataSets+offset):
                        clusterLabels.append( "Cluster "+str(i) )
                        clusterFiles.append( caseDir+"/analysis/data/cluster_"+clusterType+"_pca_d2_c"+str(i) )
                    
                    # First one is noise
                    if offset == 0:
                        clusterLabels[0] = "Noise"                 
                    
                    myPlot.plotData( 
                        resultsDir+"/plots/pcaComparison/" , 
                        clusterType+"_"+caseID+"_on_"+refID, 
                        clusterLabels, 
                        clusterFiles , 
                        "PC2",
                        xUnit = "PC1",
                        scatter = True,
                        legendLoc = 4,
                        figWidth = 8,
                        figHeight = 8,
                        tightXlimits = False,
                        legendFrame = 1,
                        legendAlpha = 1,
                        xLimits = [-limit,limit],
                        yLimits = [-limit,limit]
                    )
예제 #33
0
 def plot_convictions(self):
     # plt.figure()
     plt.matshow(self.get_matrix_of_agents_convictions())
     plt.colorbar()
     plt.clim(-1, 1)  # Sets the min/max limits of colorbar
     plt.title("Convictions")
예제 #34
0
w = np.random.random((1000, len(symbols)))

w = (w.T / w.sum(axis=1)).T

w[:5]

pvr = [(port_volatility(rets[symbols],
                        weights), port_return(rets[symbols], weights))
       for weights in w]
pvr = np.array(pvr)

psr = pvr[:, 1] / pvr[:, 0]

plt.figure(figsize=(10, 6))
fig = plt.scatter(pvr[:, 0], pvr[:, 1], c=psr, cmap='coolwarm')
cb = plt.colorbar(fig)
cb.set_label('Sharpe ratio')
plt.xlabel('expected volatility')
plt.ylabel('expected return')
plt.title(' | '.join(symbols))

# Import libraries
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt

# Creating dataset
z = pvr[:, 0]
x = pvr[:, 1]
y = psr
예제 #35
0
def plot_variable(u,
                  name,
                  direc,
                  cmap='gist_yarg',
                  scale='lin',
                  numLvls=12,
                  umin=None,
                  umax=None,
                  tp=False,
                  tpAlpha=0.5,
                  show=True,
                  hide_ax_tick_labels=False,
                  label_axes=True,
                  title='',
                  use_colorbar=True,
                  hide_axis=False,
                  colorbar_loc='right'):
    """
  """
    mesh = u.function_space().mesh()
    v = u.compute_vertex_values(mesh)
    x = mesh.coordinates()[:, 0]
    y = mesh.coordinates()[:, 1]
    t = mesh.cells()

    d = os.path.dirname(direc)
    if not os.path.exists(d):
        os.makedirs(d)

    if umin != None:
        vmin = umin
    else:
        vmin = v.min()
    if umax != None:
        vmax = umax
    else:
        vmax = v.max()

    # countour levels :
    if scale == 'log':
        v[v < vmin] = vmin + 1e-12
        v[v > vmax] = vmax - 1e-12
        from matplotlib.ticker import LogFormatter
        levels = np.logspace(np.log10(vmin), np.log10(vmax), numLvls)
        formatter = LogFormatter(10, labelOnlyBase=False)
        norm = colors.LogNorm()

    elif scale == 'lin':
        v[v < vmin] = vmin + 1e-12
        v[v > vmax] = vmax - 1e-12
        from matplotlib.ticker import ScalarFormatter
        levels = np.linspace(vmin, vmax, numLvls)
        formatter = ScalarFormatter()
        norm = None

    elif scale == 'bool':
        from matplotlib.ticker import ScalarFormatter
        levels = [0, 1, 2]
        formatter = ScalarFormatter()
        norm = None

    fig = plt.figure(figsize=(8, 7))
    ax = fig.add_subplot(111)

    c = ax.tricontourf(x,
                       y,
                       t,
                       v,
                       levels=levels,
                       norm=norm,
                       cmap=pl.get_cmap(cmap))
    plt.axis('equal')

    if tp == True:
        p = ax.triplot(x, y, t, 'k-', lw=0.25, alpha=tpAlpha)
    ax.set_xlim([x.min(), x.max()])
    ax.set_ylim([y.min(), y.max()])
    if label_axes:
        ax.set_xlabel(r'$x$')
        ax.set_ylabel(r'$y$')
    if hide_ax_tick_labels:
        ax.set_xticklabels([])
        ax.set_yticklabels([])
    if hide_axis:
        plt.axis('off')

    # include colorbar :
    if scale != 'bool' and use_colorbar:
        divider = make_axes_locatable(plt.gca())
        cax = divider.append_axes(colorbar_loc, "5%", pad="3%")
        cbar = plt.colorbar(c, cax=cax, format=formatter, ticks=levels)
        pl.mpl.rcParams['axes.titlesize'] = 'small'
        tit = plt.title(title)

    plt.tight_layout()
    d = os.path.dirname(direc)
    if not os.path.exists(d):
        os.makedirs(d)
    plt.savefig(direc + name + '.pdf')
    if show:
        plt.show()
    plt.close(fig)
예제 #36
0
    plt.imshow(cell_idx.cpu.reshape(grid_shape))
    
    plt.subplot(231)
    scale=[2*30,1.5*4][vol_preserve]
    
    
    
    cpa_space.quiver(pts_grid,v_dense,scale, ds=16/2)          
    config_plt()
    
   
     
    
    plt.subplot(232)
    plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
               vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
#    cpa_space.plot_cells()               
    config_plt()
    plt.subplot(233)
    plt.imshow(v_dense.cpu[:,1].reshape(grid_shape),interpolation='Nearest',
               vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
#    cpa_space.plot_cells()
    config_plt()

    plt.subplot(235)
    plt.imshow(v_dense.cpu[:,0].reshape(grid_shape),interpolation='Nearest',
               vmin=v_dense.cpu[:,:].min(),vmax=v_dense.cpu[:,:].max());plt.colorbar()
    cpa_space.plot_cells(color='k')               
    config_plt()
    plt.subplot(236)
    plt.imshow(v_dense.cpu[:,1].reshape(grid_shape),interpolation='Nearest',
# In[46]:


def port_vol(weights):
    return np.sqrt(np.dot(weights.T, np.dot(rets.cov() * 252, weights)))


# In[47]:

prets = []
pvols = []
for p in range(2500):
    weights = np.random.random(noa)
    weights /= np.sum(weights)
    prets.append(port_ret(weights))
    pvols.append(port_vol(weights))
prets = np.array(prets)
pvols = np.array(pvols)

# In[48]:

plt.figure(figsize=(10, 6))
plt.scatter(pvols, prets, c=prets / pvols, marker='o', cmap='coolwarm')
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
plt.title(
    'Expected Return and Volatility for Random Portfolio Weights (SPY, AAPL, MSFT, GLD)'
)
plt.colorbar(label='Sharpe Ratio')
    stdx = 9
    stdy = 3

    sint = positionToIntensityUncertainty(img, stdx, stdy)

    # CASE2: variable position uncertainty:
    # x,y 0...15
    stdx2 = np.fromfunction(lambda x, y: x * y, img.shape)
    stdx2 /= stdx2[-1, -1] / 9
    stdy2 = np.fromfunction(lambda x, y: x * y, img.shape)
    stdy2 /= stdy2[-1, -1] / 9
    stdy2 = stdy2[::-1, ::-1]  # flip content twice

    sint2 = positionToIntensityUncertainty(img, stdx2, stdy2, 21)

    if 'no_window' not in sys.argv:
        plt.figure('input')
        plt.imshow(img)
        plt.colorbar()

        plt.figure('output for const. position uncertainty (x%s,y%s)' %
                   (stdx, stdy))
        plt.imshow(sint)
        plt.colorbar()

        plt.figure('output for var. position uncertainty 0...15')
        plt.imshow(sint2)
        plt.colorbar()

        plt.show()