Exemplo n.º 1
0
def Vplot(Ws):
	"""Calculate the potential function and plot it"""

	N_bstrp = input("Please enter the number of bootstraps: ")
	N_bin = input("Please enter the bin size: ")
	style = raw_input("Please enter a linestyle: ")

	Ws = bin(Ws,N_bin)
	aVs = pl.zeros((N_bstrp,) + pl.shape(Ws)[1:])
	bs = pl.zeros((N_bstrp,3))
        
	for i in xrange(N_bstrp):
		W = pl.mean(bootstrap(Ws),axis=0)
		aVs[i] = calcaV(W,method="fit")
		bs[i] = potfit(aVs[i,:,0])

			
	r = pl.arange(1,7)
	aV = pl.mean(aVs,axis=0)
	aVerr = pl.std(aVs,axis=0)
	b = pl.mean(bs,axis=0)

	a_s = 0.5 / pl.sqrt((1.65 + bs[:,1]) / bs[:,0])
	sigmas = bs[:,0] / a_s**2
	Bs = bs[:,1]
	As = bs[:,2] / a_s

	a = pl.mean(a_s)
	aerr = pl.std(a_s)
	sigma = pl.mean(sigmas)
	sigmaerr = pl.std(sigmas)
	B = pl.mean(Bs)
	Berr = pl.std(Bs)
	A = pl.mean(As)
	Aerr = pl.std(As)

	print("Fit parameters:")
	print("sigma = %f +/- %f fm^-2 = %f +/- %f MeV^2"
		% (sigma, sigmaerr, sigma * 197**2, sigmaerr * 197**2))
	print("B = %f +/- %f" % (B, Berr))
	print("A = %f +/- %f fm^-1 = %f +/- %f MeV"
		% (A, Aerr, A*197, Aerr*197))
	print("Lattice spacing, a = %f +/- %f fm = %f +/- %f MeV^-1"
		% (a, aerr, a/197, aerr/197))
	
	r_fit = pl.arange(0.25,r[-1]+1,0.1)
	aV_fit = V(b,r_fit)
	
	handles = []
	handles.append(pl.errorbar(r,aV[:,0],yerr=aVerr[:,0],fmt='o'+style[0]))
	handles.append(pl.plot(r_fit,aV_fit,style))
	pl.ylim([0,pl.nanmax(aV)+0.25])
	pl.xlim([0,pl.nanmax(r_fit)+0.25])
	pl.xlabel("$r / a$")
	pl.ylabel("$aV(r)$")

	return aV,handles
Exemplo n.º 2
0
 def finddepth(self, showdepth = False):
     """find all the 7006 ranges that have both good colinearity and
     brightness, and bin them and use the bin with the maximum number as
     the depth for that beam."""
     print "Extracting depth information...",
     depth = pl.zeros((self.num7006, self.numbeams))
     depth[:,:] = pl.nan
     flagmask = pl.zeros(self.numbeams, dtype=int)
     flagmask[:] = 3
     for i in xrange(self.num7006):
         self.calfile.getrecord(7006, i)
         pingflags = pl.asarray(self.calfile.packet.subpack.data[1, :], dtype=int)
         indx = pl.find((flagmask & pingflags) == 3)
         depth[i, indx] = self.calfile.packet.subpack.data[0,indx]    
     print "estimating the depth...",
     depthmax = pl.nanmax(depth)
     self.depthbins = pl.arange(0, depthmax + self.windowlen, self.windowlen)
     self.depthestimate = pl.zeros(self.numbeams)
     for j in xrange(self.numbeams):
         h = pl.histogram(depth[:, j], bins = self.depthbins)
         self.depthestimate[j] = h[1][h[0].argmax()]
     print "depth estimation completed, binning beams."
     # sort beams with similar ranges into a list
     self.beamlisting = []
     for bin in self.depthbins:
         indx = pl.find((self.depthestimate < bin + self.windowlen) 
             & (self.depthestimate >= bin - self.windowlen))
         if len(indx > 0):
             self.beamlisting.append(indx)
     if showdepth:
         fig = pl.figure()
         ax = fig.add_subplot(111)
         ax.plot(depth.T, 'x')
         ax.plot(self.depthestimate,'o')
         ax.set_xlabel('Beam Number')
         ax.set_ylabel('Time (seconds)')
         ax.set_title('Bottom Detections')
         pl.xlim((0,self.numbeams))
         ymin, ymax = pl.ylim()
         pl.ylim((ymax, ymin))
         ax.grid()
         pl.draw()
Exemplo n.º 3
0
def heat(df, group_func, title, strategy, cb_label, sequential=True, vmax=None):
    x = df['explore'].unique()
    y = df['decay'].unique()
    z = pylab.zeros((len(y), len(x)))
    grouped = df.groupby(['explore', 'decay'])
    pos = 0
    for name, group in grouped:
        val = group_func(group)
        if name == (MLE_EXPLORE, MLE_DECAY):
            pass #print title, val
        i, j = pos % len(y), pos / len(y)
        z[i, j] = val
        pos += 1
    if pos != len(x) * len(y):
        print pos, len(x), len(y), len(x) * len(y)
    assert pos == len(x) * len(y)
    z = ma.masked_invalid(z)
    pylab.figure()
    map = 'Reds'
    vmin = 0
    if vmax == None:
        vmax = pylab.nanmax(pylab.absolute(z))
    if vmax == vmin:
        vmax = vmin + 0.01
    if not sequential:
        map = 'coolwarm'
        vmin = -max_abs
    #print title, vmin, vmax, map
    hm = pylab.pcolormesh(z, cmap=map, vmin=vmin, vmax=vmax, clim=(vmin, vmax))
    curr_ax = pylab.gca()
    curr_ax.axis('tight')
    cb = pylab.colorbar(hm)
    cb.set_clim(vmin, vmax)
    cb.ax.set_ylabel(cb_label, fontsize=20)
    cb.ax.set_ylim(bottom=0)
    pylab.tick_params(which='both', bottom='off', top='off', left='off', right='off', \
    labeltop='off', labelbottom='off', labelleft='off', labelright='off')
    pylab.xlabel("explore probability (%0.2f - %0.2f)" % (min(x), max(x)), fontsize=20)
    pylab.ylabel("pheromone decay (%0.2f-%0.2f)" % (min(y), max(y)), fontsize=20)
    pylab.savefig("%s_%s.png" % (title, strategy), format="png", transparent=TRANSPARENT, bbox_inches='tight')
    os.system('convert %s_%s.png %s_%s.pdf' % (title, strategy, title, strategy))
Exemplo n.º 4
0
zelong = zelong[keep]
n_elong_dendro = len(zelong)

# this will be for the list of fil,branch pairs that overlap this dendro, and by how many pixels:
overlapping_fil_branches = pl.repeat({}, n_elong_dendro)
for i in range(n_elong_dendro):
    overlapping_fil_branches[i] = {
        "dendro_id": 0,
        "filbranches": [],
        "npixoverlap": []
    }.copy()

#================================================================
mom8 = fits.getdata(mom8file)
pl.clf()
pl.imshow((pl.nanmax(mom8) - mom8), origin="bottom", cmap="gray")

pl.subplots_adjust(left=0.05, right=0.98, bottom=0.05, top=0.98)
pl.xlim(xra)
pl.ylim(yra)
pl.xticks([])
pl.yticks([])
pl.contour(fils.skeleton,
           colors='c',
           linewidths=1,
           vmin=0,
           vmax=1,
           levels=[0.5])

for ff in fils.filaments:
    for b in ff.end_pts:
Exemplo n.º 5
0
def run_filfinder(label='mycloud',
                  mom8file=None,
                  mom0file=None,
                  redo=False,
                  distpc=4.8e4,
                  xra=[0, 2000],
                  yra=[0, 2000],
                  glob_thresh=0.1,
                  adapt_beams=14):

    plotfiledir = "fil_plots"  # no trailing /
    # filpropfile = label+".filprops.pkl" # phys props

    fits_hdu = fits.open(mom8file)[0]
    # setting beamwidth will have no effect if a fits header is given
    fils = FilFinder2D(fits_hdu, distance=distpc * u.pc)

    fils.save_name = plotfiledir + "/" + label
    if not os.path.exists(plotfiledir):
        os.mkdir(plotfiledir)
        redo = True

    fils.preprocess_image(flatten_percent=90)

    # GMC1 adapt=50pix, size=300pix2, smooth=8pix    ppbm=1.76"/0.5=3.53
    # so in bms, that's adapt=14, size=24, smooth=2.3
    # for gmc1, using adapt=10, size=15 gives 23 fils instead of 8 and too much chaff,
    # size=24, adapt=14, smooth=2.0: 9 fils instead of 8, more branches, and more vel perpendicular to fils.
    # size=24, adapt=14, smooth=3: 11fils, a bit fewer small fils.

    # 30dor: glob=72, flatten=100, adapt=75pix, size=800pix2  ppbm=0.29/0.032=9.1
    # so in bms, adapt=8.2, size=9.7 but bm elongated so narrow dir adapt=10

    # one can lower glob_thresh, get more small things, then lower adapt_thresh to
    # make them break up more, and finally let size_thresh kill them
    # in new FilFinder, glob_thresh is no longer a percentage its a value;
    # setting to 50% of the image max is too high; 20% is closer, 10% quite good

    bmwidth = pl.sqrt(
        fits_hdu.header['bmaj'] * fits_hdu.header['bmin']) * 3600 * u.arcsec
    # 30dor tuned parameters
    #fils.create_mask(verbose=True, border_masking=False,
    #                 use_existing_mask=False,save_png=True,
    #                 adapt_thresh=14*bmwidth, size_thresh=24*bmwidth**2,
    #                 smooth_size=3*bmwidth, glob_thresh=pl.nanmax(fits_hdu.data)*glob_thresh)
    fils.create_mask(verbose=True,
                     border_masking=False,
                     use_existing_mask=False,
                     save_png=True,
                     adapt_thresh=adapt_beams * bmwidth,
                     size_thresh=24 * bmwidth**2,
                     smooth_size=3 * bmwidth,
                     glob_thresh=pl.nanmax(fits_hdu.data) * glob_thresh)
    # GMC104: decreasing adapt_thresh makes more filaments but also more arcs
    #fils.create_mask(verbose=True, border_masking=False,
    #                 use_existing_mask=False,save_png=True,
    #                 adapt_thresh=5*bmwidth, size_thresh=24*bmwidth**2,
    #                 smooth_size=3*bmwidth, glob_thresh=pl.nanmax(fits_hdu.data)*glob_thresh)

    fils.medskel(verbose=True, save_png=True)

    print("calling analyze_skeletons")
    # 30dor:
    # fils.branch_thresh=200 # typical lengths are 1-100
    # raise from 10 to 30 to elminate more little things
    # fils.analyze_skeletons(verbose=True, save_png=True, skel_thresh=10,cubefile=cubefile)

    # mc3: old version was doing double filter instead, with branch_thresh=30;
    # branch_thresh says it overrides all previous settings
    fils.analyze_skeletons(verbose=True,
                           save_png=True,
                           prune_criteria="length",
                           branch_nbeam_lengths=5,
                           nbeam_lengths=5,
                           skel_thresh=10 * u.pix)
    #relintens_thresh=0.1, branch_thresh=30,

    # the labeled filament arrays are left separated even though the branches
    # have been pruned, so we need to recalculate the labelled_filament_arrays
    # pix_identify is supposed to do that;
    # fils.filament_arrays comes from make_final_skeletons

    if redo:
        pl.clf()
        vmin = np.percentile(fils.flat_img[np.isfinite(fils.flat_img)], 20)
        vmax = np.percentile(fils.flat_img[np.isfinite(fils.flat_img)], 99)
        pl.imshow(fils.flat_img.value,
                  interpolation=None,
                  origin="lower",
                  vmin=vmin,
                  vmax=vmax * 1.5,
                  cmap='jet')
        pl.contour(fils.skeleton, colors='r')
        offs = fils.array_offsets
        for i in range(len(offs)):
            pl.text(0.5 * (offs[i][0][1] + offs[i][1][1]),
                    0.5 * (offs[i][0][0] + offs[i][1][0]),
                    str(i),
                    color='orange')
        pl.xlim(xra)
        pl.ylim(yra)
        pl.savefig(plotfiledir + "/" + plotfiledir + ".skel.png")

        # find_widths calculates the 2d distance transform, and uses self.image, self.imgscale
        # first widths on mom0
        fils.set_image(fits.getdata(mom0file))

        fils.save_name = plotfiledir + ".mom0wid"
        fils.find_widths(verbose=True, save_png=True, max_dist=2. * u.pc)
        mom0width = fils.width_fits().copy()

        pl.clf()
        vmin = np.percentile(fils.image[np.isfinite(fils.image)], 20)
        vmax = np.percentile(fils.image[np.isfinite(fils.image)], 99)
        pl.imshow(fils.image.value,
                  interpolation=None,
                  origin="lower",
                  vmin=vmin,
                  vmax=vmax * 1.5,
                  cmap='jet')
        pl.contour(fils.skeleton, colors='r')
        offs = fils.array_offsets
        for i in range(len(offs)):
            pl.text(0.5 * (offs[i][0][1] + offs[i][1][1]),
                    0.5 * (offs[i][0][0] + offs[i][1][0]),
                    str(i),
                    color='orange')
        pl.xlim(xra)
        pl.ylim(yra)
        pl.savefig(plotfiledir + "/" + plotfiledir + ".int.skel.png")

        # now widths and orientations on the mom8 directly:
        fils.set_image(fits.getdata(mom8file))

        fils.save_name = plotfiledir + ".mom8wid"
        fils.find_widths(verbose=True, save_png=True,
                         max_dist=2. * u.pc)  # CHANGE FOR 30DOR
        mom8width = fils.width_fits().copy()

        pl.clf()
        x = fils.converter.from_pixel(mom8width['fwhm'], u.pc).value
        dx = fils.converter.from_pixel(mom8width['fwhm_err'], u.pc).value
        y = fils.converter.from_pixel(mom0width['fwhm'], u.pc).value
        dy = fils.converter.from_pixel(mom0width['fwhm_err'], u.pc).value
        pl.errorbar(x, y, xerr=dx, yerr=dy, fmt='.')
        xmax = pl.nanmax([x, y])
        pl.xlim(0, xmax * 1.1)
        pl.ylim(0, xmax * 1.1)
        pl.xlabel("FWHM fit to mom8 [pc]")
        pl.ylabel("FWHM fit to mom0 [pc]")
        pl.plot([0, xmax], [0, xmax], ':k')
        pl.savefig(plotfiledir + "/" + plotfiledir + ".widths.mom8.mom0.png")

    return fils
Exemplo n.º 6
0
def greedy_eval_stats(model='uniform'):
    df = pd.read_csv('greedy_eval_%s.csv' % model, skipinitialspace=True)
    df['success'] = df['cost ratio'] == 0
    df['error'] = df['cost ratio'] < 0

    mcost_ratios = []
    scost_ratios = []
    cost_ratios = []
    time_ratios = []
    algorithm_labels = {'brute force' : 'Brute force', 'steiner' : 'Greedy', 'prim' : 'Karger', 'khuller' : 'Khuller'}
    labels = []

    for algorithm, group in df.groupby('algorithm'):
        print algorithm
        alpha = group['alpha']
        print len(alpha) / len(alpha.unique()), "point sets", len(alpha), "trials"
        print "success rate", pylab.mean(group['success'])
        print "error rate", pylab.mean(group['error'])
        print "dominated", pylab.mean(group['dominated'])
        
        mcost_ratio = group['mcost ratio']
        scost_ratio = group['scost ratio']
        cost_ratio = group['cost ratio']
        time_ratio = group['time ratio']

        print "mcost ratio", pylab.nanmean(mcost_ratio), "+/-", pylab.nanstd(mcost_ratio, ddof=1)
        print "scost ratio", pylab.nanmean(scost_ratio), "+/-", pylab.nanstd(scost_ratio, ddof=1)
        print "cost ratio", pylab.nanmean(cost_ratio), "+/-", pylab.nanstd(cost_ratio, ddof=1)
        print "time ratio", pylab.nanmean(time_ratio), "+/-", pylab.nanstd(time_ratio, ddof=1)
        
        print "max mcost ratio", pylab.nanmax(mcost_ratio)
        print "max scost ratio", pylab.nanmax(scost_ratio)
        print "max cost ratio", pylab.nanmax(cost_ratio)
        print "max time ratio", pylab.nanmax(time_ratio)

        mcost_ratios.append(mcost_ratio)
        scost_ratios.append(scost_ratio)
        cost_ratios.append(cost_ratio)
        time_ratios.append(time_ratio)
        labels.append(algorithm_labels[algorithm])

        for points, points_group in group.groupby('points'):
            print points, pylab.mean(points_group['time'])

    sns.set()
    
    axis_text = {'mcost' : 'wiring cost', 'scost' : 'conduction delay', 'cost' : 'Pareto cost', 'time' : 'runtime'}
    for ratios, fname in zip([mcost_ratios, scost_ratios, cost_ratios, time_ratios], ['mcost', 'scost', 'cost', 'time']):
        pylab.figure()
        x = []
        weights = []
        for ratio, label in zip(ratios, labels):
            ratio = pylab.array(ratio)
            ratio = ratio[~pylab.isnan(ratio)]
            x.append(ratio)
            wt = pylab.ones_like(ratio) / float(len(ratio))
            weights.append(wt)
        pylab.hist(x, weights=weights, label=labels)
        pylab.legend()
        ax = pylab.gca()
        pylab.setp(ax.get_legend().get_texts(), fontsize=20) # for legend text
        ax_text = axis_text[fname]
        pylab.xlabel('%s (percent higher/lower than %s)' % (ax_text, BASELINE), size=20)
        pylab.ylabel('proportion', size=20)
        pylab.tight_layout()
        pylab.savefig('greedy_eval/%s_ratios_hist.pdf' % fname, format='pdf')
        pylab.close()
Exemplo n.º 7
0
    def extract7028(self,  graph = False):
        """
        Extract 7028 data from a s7k file written by the sevenpy script.
        Returns a ping list for amplitude, power and gain settings, and 
        beam sections.
        """
        
        num7028 = len(self.calfile.map.packdir['7028'])
        r7000 = pl.array(self.calfile.map.packdir['7000'])
        self.pingdata = pl.zeros((num7028,3,len(self.beamlisting)))
        print 'Processing ' + str(num7028) + ' pings, at           ',

        if graph:
            pl.hold(False)
        j = 0
        for i in xrange(num7028):
            #get 7028 max amplitude
            self.calfile.getrecord('7028',i)
            if self.calfile.packet.subpack.snippets is not None:
                snip = self.calfile.packet.subpack.snippets.T
                maxbeam = self.calfile.packet.subpack.maxbeam
                n,m = snip.shape
                amp = pl.zeros((n,self.numbeams))
                amp[:,:] = pl.nan
                amp[:,:maxbeam] = snip
                # get the start and end samples for each beam
                desc = self.calfile.packet.subpack.descriptor.astype(pl.np.int)
                pingdepth = pl.zeros((self.numbeams,2))
                pingdepth[desc[:,0],:] = desc[:,[1,3]]/ self.samplerate
                # get the range of snippets that are in the depth window
                depthmask = ((self.depthestimate < pingdepth[:, 1]) & (self.depthestimate > pingdepth[:, 0]))
                # filter the snippets according to the mask
                amp *= depthmask
                ts7028 = self.calfile.packet.gettime()
                if graph:
                    self.calfile.packet.subpack.plot()
                    pl.draw()
                
                #get settings from corisponding 7000 packet
                indx7000 = pl.find(ts7028 == r7000[:,1])
                if len(indx7000) == 0:
                    num7028 -= 1
                elif len(indx7000) > 1:
                    print 'more than one 7000 record with matching time stamp!'
                else:
                    self.calfile.getrecord('7000', indx7000[0])
                    power = self.calfile.packet.subpack.header[14]
                    gain = self.calfile.packet.subpack.header[15]         
                    if power not in self.settings['power'] and power != 0:
                        self.settings['power'].append(power)
                    if gain not in self.settings['gain'] and gain !=0:
                        self.settings['gain'].append(gain)
                    for k, indx in enumerate(self.beamlisting):
                        self.pingdata[j, :, k] = pl.nanmax(amp[:, indx]), power, gain
                j += 1
            if i % 10 == 0:
                sys.stdout.write('\b\b\b\b\b\b\b\b\b\b%(percent)02d percent' %{'percent':100 * i / num7028})
        self.calfile.close()
        print '\n' + str(num7028) + 'records used.'
        self.settings['power'].sort()
        self.settings['gain'].sort()
        self.pingdata = self.pingdata[:num7028]
Exemplo n.º 8
0
def plot_aggregate_dfs(df, metrics, descriptor):
    '''
    Plots an aggregate heatmap. Given a heatmap with the performance of several metrics across
    several networks, this function aggregates the performance of each metric across the
    networks. Uses geometric mean to aggregate results for two reasons. First, for certain metrics
    the range of values for that metric can vary wildly across network topologies. The geometric mean
    (as opposed to arithmetic mean) corrects for this and prevents networks where the
    metric tends to take on higher values to have a disproportionate effect. Second, for
    success rate metrics that are [0, 1]-valued, the geometric mean strongly penalizes an algorithm
    if it performs extremely poorly on any one network
    
    '''
    x = df['explore'].unique()
    y = df['decay'].unique()
    matrices = []
    for i in xrange(len(metrics)):
        matrices.append(np.zeros((len(y), len(x))))
    pos = 0
    best_aggs = [float("-inf")] * len(metrics)
    best_explores = [None] * len(metrics)
    best_decays = [None] * len(metrics)
    for name, group in df.groupby(['explore', 'decay']):
        explore, decay = name
        i, j = pos % len(y), pos / len(y)
        for k, metric in enumerate(metrics):
            val = gmean(group[metric])
            if val > best_aggs[k]:
                best_aggs[k] = val
                best_explores[k] = explore
                best_decays[k] = decay
            if explore == EXPLORE_MLE and decay == DECAY_MLE:
                pass #print metric, val
            z = matrices[k]
            z[i, j] = val
        pos += 1
        
    if pos != len(x) * len(y):
        print pos, len(x), len(y), len(x) * len(y)
    assert pos == len(x) * len(y)
        
    for k, metric in enumerate(metrics):
        title = 'aggregate_%s' % metric
        z = matrices[k]
        z = ma.masked_invalid(z)
        pylab.figure()
        map = 'Reds'
        vmin = 0
        vmax = pylab.nanmax(pylab.absolute(z))
        cb_label = '%s mean' % metric

        hm = pylab.pcolormesh(z, cmap=map, vmin=vmin, vmax=vmax, clim=(vmin, vmax))
        curr_ax = pylab.gca()
        curr_ax.axis('tight')
        cb = pylab.colorbar(hm)
        cb.set_clim(vmin, vmax)
        #cb.ax.set_ylabel('geometric mean', fontsize=20)
        cb.ax.set_ylim(bottom=0)
        pylab.tick_params(which='both', bottom='off', top='off', left='off', right='off', \
        labeltop='off', labelbottom='off', labelleft='off', labelright='off')
        #pylab.xlabel("explore probability (%0.2f - %0.2f)" % (min(x), max(x)), fontsize=20)
        #pylab.ylabel("pheromone decay (%0.2f-%0.2f)" % (min(y), max(y)), fontsize=20)
        pylab.savefig("%s_%s.png" % (title, descriptor), format="png", transparent=True, bbox_inches='tight')
        #os.system('convert %s_%s.png %s_%s.pdf' % (title, descriptor, title, descriptor))
        pylab.close()

        print metric, best_aggs[k], best_explores[k], best_decays[k]