Example #1
0
def plot_learning_curves(num_points,
                         X_train,
                         Y_train,
                         X_test,
                         Y_test,
                         positive_class=1,
                         negative_class=0):
    train_set_sizes = [len(X_train) / k for k in range(num_points + 1, 0, -1)]
    test_errors = []
    training_errors = []
    for training_set_size in train_set_sizes:
        model = train(X_train, Y_train, training_set_size)
        test_error = evaluate(model, X_test, Y_test, positive_class,
                              negative_class)
        training_error = evaluate(model, X_train, Y_train, positive_class,
                                  negative_class)
        test_errors.append(test_error)
        training_errors.append(training_error)

    plt.plot(train_set_sizes,
             training_errors,
             'bs-',
             label='Training accuracy')
    plt.plot(train_set_sizes, test_errors, 'g^-', label='Test accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Number of training samples')
    plt.title('Augmented Logistic Regression Learning Curve')
    plt.legend(loc='lower right')
    plt.savefig('../Figures/accuracyPlotAugmented.png', dpi=100)
    pylab.show()
Example #2
0
def plot_sdss(cat_path):
    for catfile in find_files(cat_path, "*merged+sdss.txt"):

        # for now ignore the channel 2 files
        if catfile.split('/')[-1].split('_')[1] != '1':
            continue

        print("\nreading catalog: {}".format(catfile))
        df = pd.read_table(catfile, sep=' ')

        # get rid of negative flux sources, if any
        df = df[df.flux > 0]

        # convert to magnitudes
        mags = spz_jy_to_mags(df.flux * 1e-3, 1)

        # print counts per magnitude bin
        for i in range(10, 15):
            sc = ((df.cl == 3) & (mags > i) & (mags < i + 1)).sum()
            xc = ((df.xsc == 1) & (mags > i) & (mags < i + 1)).sum()
            msg = "{}th to {}th mag: {} SDSS galaxy sources, {} 2MASS XSC sources"
            print(msg.format(i, i + 1, sc, xc))

        # print number of sources agreed upon
        agree = ((df.xsc == 1) & (df.cl == 3)).sum()
        disagree = ((df.xsc == 1) & (df.cl == 6)).sum()
        na = ((df.xsc == 1) & (df.cl == 0)).sum()
        msg = "{} 2MASS XSC sources classified as galaxies by SDSS"
        print(msg.format(agree))
        msg = "{} 2MASS XSC sources classified as stars by SDSS"
        print(msg.format(disagree))
        msg = "{} 2MASS XSC sources not matched to SDSS"
        print(msg.format(na))

        # plot normed histograms of 2MASS XSC and SDSS galaxy magnitudes
        xsc_gals = (mags > 10) & (mags < 15) & (df.xsc == 1)
        sdss_gals = (mags > 10) & (mags < 15) & (df.cl == 3)
        # mags[xsc_gals].hist(label='2MASS XSC', normed=True)
        # mags[sdss_gals].hist(label='SDSS galaxies', normed=True)
        plt.hist([mags[xsc_gals].values, mags[sdss_gals].values],
                 bins=5,
                 label=['2MASS', 'SDSS'])
        plt.xlabel('IRAC1 [mag]')
        plt.ylabel('Number Count')
        reg = catfile.split('/')[-1].split('_')[0]
        plt.title('{} Extended Sources / Galaxies'.format(reg))
        plt.legend(loc=2)
        name = '{}_2mass_xsc_vs_sdss_hist.png'.format(reg)
        outpath = '/'.join(catfile.split('/')[:-1] + [name])
        plt.savefig(outpath, dpi=100)
        plt.close()
        print("created file: {}".format(outpath))
Example #3
0
def plot_sdss(cat_path):
	for catfile in find_files(cat_path, "*merged+sdss.txt"):

		# for now ignore the channel 2 files
		if catfile.split('/')[-1].split('_')[1] != '1':
			continue

		print("\nreading catalog: {}".format(catfile))
		df = pd.read_table(catfile, sep=' ')

		# get rid of negative flux sources, if any
		df = df[df.flux > 0]

		# convert to magnitudes
		mags = spz_jy_to_mags(df.flux*1e-3, 1)

		# print counts per magnitude bin
		for i in range(10,15):
			sc = ((df.cl == 3) & (mags > i) & (mags < i+1)).sum()
			xc = ((df.xsc == 1) & (mags > i) & (mags < i+1)).sum() 
			msg = "{}th to {}th mag: {} SDSS galaxy sources, {} 2MASS XSC sources"
			print(msg.format(i, i+1, sc, xc))

		# print number of sources agreed upon
		agree = ((df.xsc == 1) & (df.cl == 3)).sum()
		disagree = ((df.xsc == 1) & (df.cl == 6)).sum()
		na = ((df.xsc == 1) & (df.cl == 0)).sum()
		msg = "{} 2MASS XSC sources classified as galaxies by SDSS"
		print(msg.format(agree))
		msg = "{} 2MASS XSC sources classified as stars by SDSS"
		print(msg.format(disagree))
		msg = "{} 2MASS XSC sources not matched to SDSS"
		print(msg.format(na))

		# plot normed histograms of 2MASS XSC and SDSS galaxy magnitudes
		xsc_gals = (mags > 10) & (mags < 15) & (df.xsc == 1)
		sdss_gals = (mags > 10) & (mags < 15) & (df.cl == 3)
		# mags[xsc_gals].hist(label='2MASS XSC', normed=True)
		# mags[sdss_gals].hist(label='SDSS galaxies', normed=True)
		plt.hist([mags[xsc_gals].values, mags[sdss_gals].values],
			bins=5, label=['2MASS', 'SDSS'])
		plt.xlabel('IRAC1 [mag]')
		plt.ylabel('Number Count')
		reg = catfile.split('/')[-1].split('_')[0]
		plt.title('{} Extended Sources / Galaxies'.format(reg))
		plt.legend(loc=2)
		name = '{}_2mass_xsc_vs_sdss_hist.png'.format(reg)
		outpath = '/'.join(catfile.split('/')[:-1]+[name])
		plt.savefig(outpath, dpi=100)
		plt.close()
		print("created file: {}".format(outpath))
Example #4
0
def submit_time_histogram(arr):
    """
    Use Matplotlib to plot a normalized histogram of submit times
    """
    from math import ceil, log
    try:
        import matplotlib.mlab as mlab
        from prettyplotlib import plt
    except ImportError:
        print(
            'You must have Matplotlib and Prettyplotlib installed to plot a histogram.'
        )

    # Use Sturges' formula for number of bins: k = ceiling(log2 n + 1)
    k = ceil(log(len(arr), 2) + 1)
    n, bins, patches = plt.hist(arr,
                                k,
                                normed=1,
                                facecolor='green',
                                alpha=0.75)
    # throw a PDF plot on top of it
    #y = mlab.normpdf(bins, np.mean(arr), np.std(arr))
    #l = plt.plot(bins, y, 'r--', linewidth=1)

    # Get a Bayesian confidence interval for mean, variance, standard deviation
    dmean, dvar, dsd = bayes_mvs(deltas)

    # drop a line in at the mean for fun
    plt.axvline(dmean[0], color='blue', alpha=0.5)
    plt.axvspan(dmean[1][0], dmean[1][1], color='blue', alpha=0.5)
    plt.axvline(np.median(deltas), color='y', alpha=0.5)

    # Caclulate a Kernel Density Estimate
    density = gaussian_kde(deltas)
    xs = np.arange(0., np.max(deltas), 0.1)
    density.covariance_factor = lambda: .25
    density._compute_covariance()
    plt.plot(xs, density(xs), color='m')

    #FIXME: come up with better legend names
    #plt.legend(('Normal Curve', 'Mean', 'Median', 'KDE'))
    plt.legend(('Mean', 'Median', 'KDE'))

    plt.xlabel('Submit Times (in Seconds)')
    plt.ylabel('Probability')
    plt.title('Histogram of Worker submit times')
    plt.grid(True)

    plt.show()
def layerActivations(model, data, labels):
    print('Visualizing activations with tSNE...')

    if not os.path.exists(cc.cfg['plots']['layer_activations_dir']):
        os.makedirs(cc.cfg['plots']['layer_activations_dir'])


    numLabels = cc.cfg['plots']['layer_activations_label_cap']

    data = data[:cc.cfg['plots']['layer_activations_points_cap']]
    labels = labels[:numLabels,:cc.cfg['plots']['layer_activations_points_cap']]

    subplotCols = numLabels
    subplotRows = len(model.layers)-1
    subplotIdx = 1

    plt.figure(figsize=(5*subplotCols,5*subplotRows))

    for i in range(1,len(model.layers)):
        print('Running tSNE for layer {}/{}'.format(i+1,len(model.layers)))

        func = K.function([model.layers[0].input], [model.layers[i].output])
        out = func([data])[0]

        tsneModel = TSNE(n_components = 2, random_state = 0)
        tsneOut = tsneModel.fit_transform(out).T

        # labeledTsneOut = np.hstack((tsneOut, labels[0].reshape(-1,1)))

        for j in range(numLabels):
            plt.subplot(subplotRows, subplotCols, subplotIdx)
            plt.title('{} / {}'.format(model.layers[i].name,cc.exp['params']['data']['labels'][j]))
            plt.scatter(tsneOut[0],tsneOut[1],c=labels[j],cmap = 'plasma')

            subplotIdx += 1

        # tsneDF = pd.DataFrame(labeledTsneOut, columns = ('a', 'b', 'c'))
        # plot = tsneDF.plot.scatter(x = 'a', y = 'b', c = 'c', cmap = 'plasma')


    plt.tight_layout()
    plt.savefig('{}/activations.png'.format(cc.cfg['plots']['layer_activations_dir']))
    plt.close()

    print('...done')
def histograms(modelLogger):
    if not cc.cfg['plots']['histograms']:
        return

    if not os.path.exists(cc.cfg['plots']['histograms_dir']):
        os.makedirs(cc.cfg['plots']['histograms_dir'])

    cntEpochs = len(modelLogger.epochLogs)
    cntLayers = len(modelLogger.epochLogs[-1]['weights'])

    logVals = [
        {'name':'weights','color':'blue'},
        {'name':'updates','color':'red'},
        {'name':'ratios','color':'green'}
    ]

    subplotRows = len(logVals)
    subplotCols = cntLayers

    for x in range(cntEpochs):
        subplotIdx = 1

        plt.figure(figsize=(5*subplotCols,5*subplotRows))
        plt.suptitle('Histograms per layer, epoch {}/{}'.format(x,cntEpochs-1), fontsize=14)

        for logVal in logVals:
            for i,layer in enumerate(modelLogger.epochLogs[x][logVal['name']]):
                histmin = layer.min()
                histmax = layer.max()

                plt.subplot(subplotRows, subplotCols, subplotIdx)
                plt.title('{}, {}'.format(modelLogger.model.layers[modelLogger.loggedLayers[i]].name,logVal['name']))
                plt.hist(layer, range = (histmin, histmax), bins = 30, color = logVal['color'])

                subplotIdx+=1

        plt.tight_layout()
        plt.subplots_adjust(top=0.9)
        plt.savefig('{}/hist_{e:03d}.png'.format(cc.cfg['plots']['histograms_dir'], e = x))
        plt.close()
Example #7
0
    def hero_stats_bar(self, thresh):
        '''
        Creates a bar chart of the heroes with highest
        Wins / #Games played ratio
        '''
        # Create individual hero wins and losses
        self.hero_stats()

        fig, ax = plt.subplots(1, figsize=(9, 7))
        # Compute the ratio of #Wins to the #Games played by that hero
        # Most relevant statistic, better than W/L Ratio, better than
        # just wins, all of them can be statistically insignificant
        # in edge cases, but this can be the least of all
        val = [
            (k, self.wstat[k] / float(self.dstat[k] + self.wstat[k]))
            for k in self.wstat
            if self.wstat[k] / float(self.dstat[k] + self.wstat[k]) >= thresh
        ]
        plt.title('Hero ID vs. Win Ratio (Matches from 01/23 - 02/24)')
        plt.xlabel('Hero ID')
        plt.ylabel('Win Ratio')
        ax.set_xlim([0, len(val)])
        ann = [round(k[1], 2) for k in val]
        # Extract the xticklabels
        xtl = [k[0] for k in val]
        # Extract the individual values to be plotted
        val = [k[1] for k in val]
        ppl.bar(ax,
                np.arange(len(val)),
                val,
                annotate=ann,
                xticklabels=xtl,
                grid='y',
                color=ppl.colors.set2[2])
        fig.savefig('../Figures/HIDvs#Wins#Games.png')
        plt.show()
        plt.clf()
                for x in range(count):
                    data[locat].append(value)

print("data")
for i in data:
    print(i[:10])
print("data")

#np.random.seed(10)

#data = np.random.randn(8, 4)

u_labels = ['1e-7 mid','1e-7 end','1e-6 mid','1e-6 end','1e-5 mid','1e-5 end','1e-4 mid','1e-4 end','0.001 mid','0.001 end','0.005 mid','0.005 end','0.01 mid','0.01 end',\
        '0.05 mid', '0.05 end','0.1 mid','0.1 end']

fig, ax = plt.subplots()

ax.set_xticklabels(u_labels)
ax.set_xticks(u_labels)
ax.set_xlabel('Initial Mutation Rates', fontsize=9)
ax.set_ylabel('End Proliferation Rates', fontsize=9)
i = 0
print(ax.get_xticklabels())
ppl.boxplot(ax, data)  #,xticklabels=u_labels)
plt.title("Distribution of End Proliferation Rates by Initial Mutation Rate",
          fontsize=9)
fig.savefig(opt.output + '_boxplot.png')
print(opt.output + "DONE")
#ppl.hist(ax,data)
#fig.savefig('histogram_prettyplotlib_default.png')
Example #9
0
alphas = data[2]
eps = data[0]
delta = 0.1

string = [r'$\tau\delta$ = %.2f' % float(delta*t) for t in taus]
#string2 = [r'$\tau$ = '+str(data[3][i]) for i in range(5)]
maxmmse = np.max(eps)
#maxf = np.max(data[1])
def f(x):
    ppl.plot(alphas,eps[:,x]/maxmmse,label=string[x],ax=ax1)
#	ax2.plot(data[2],data[1][:,x]/maxf,label=string2[x])
#plt.close()
#plt.figure()
#ax1 = plt.gcf().add_subplot(1,1,1)
fig, ax1 = ppl.subplots(1)
plt.title(r'MMSE for a Reconstruction Task with Adaptive Neurons')
#ax2 = plt.gcf().add_subplot(1,2,2)
#plt.title(r'Firing rate of adaptive code')
map(f,range(0,10,2))
ppl.legend(ax1)
#ax2.legend()
ax1.set_xlabel(r'$\alpha$')
ax1.set_ylabel(r'MMSE')
#ax2.set_xlabel(r'$\alpha$')
#ax2.set_ylabel(r'Firing Rate')
#plt.xlabel(r'Tuning width $\alpha$')
#plt.ylabel(r'MMSE')
plt.show()
#plt.savefig("mmse_discrimination.png",dpi=600)
plt.savefig("figure_5_8.png",dpi=600)
plt.savefig("figure_5_8.eps")
            y = eta**2/(2*gamma)
            print nd, " of ",len(deltas)
            for nphi,phi in enumerate(phis):
                fun = lambda x: f(x,gamma,eta,phi,alpha,thetas,tau,delta)
                y = opt.fsolve(fun, y)
                epss[nd,nphi] = y/(eta**2/(2*gamma))
                ls[nd,nphi] = lambdabar(thetas,gamma,eta,phi,alpha,tau,delta)
    
    strings = [r'$\tau\delta$ = '+str(i*tau) for i in deltas]
    
    #f = lambda i : plt.plot(alphas,epss[i,:],label=strings[i])
    #map(f,range(len(deltas)))
    fig, ax = ppl.subplots(1,figsize=(7,7))
    ppl.plot(ls[0,:],epss[0,:],label = strings[0], ax=ax)
    ppl.plot(ls[1,:],epss[1,:],label = strings[1], ax=ax)
    ppl.plot(ls[2,:],epss[2,:],label = strings[2], ax=ax)
    ppl.plot(ls[3,:],epss[3,:],label = strings[3], ax=ax)
    ppl.plot(ls[4,:],epss[4,:],label = strings[4], ax=ax)
    
    np.savez("../data/mean_field_ratedist.npz", eps = epss, ls=ls, phis = phis, deltas = deltas, tau = tau)
   
    ax.set_xlim([0.0,25.0])
    ax.set_xlabel(r'$\lambda$ in spikes/sec')
    ax.set_ylabel(r'MMSE/$\epsilon_0$')
    plt.title("Rate-Distortion Curve for Adaptive Neurons")
    ppl.legend(ax)
    plt.savefig("rate_distortion_mf_alpha.png")
    plt.savefig("rate_distortion_mf_alpha.eps")
    plt.show()

observation = pm.Poisson("obs", lambda_, value=datos, observed=True)
model = pm.Model([observation, lambda_1, lambda_2, tau])
mcmc = pm.MCMC(model)
mcmc.sample(50000, 10000, 1)

lambda_1_samples = mcmc.trace('lambda_1')[:]
lambda_2_samples = mcmc.trace('lambda_2')[:]

tau_samples = mcmc.trace('tau')[:]

fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1)

plt.rc('font', **{'family': 'DejaVu Sans'})
plt.subplot(311)
plt.title(u'''DistribuciĆ³n posterior de las variables
            $\lambda_1,\;\lambda_2,\;tau$''')
plt.hist(lambda_1_samples, histtype="stepfilled", bins=30, alpha=0.85,
        normed=True)
plt.xlim([150000,250000])
plt.xlabel("valor de $\lambda_1$")


plt.subplot(312)
#ax.set_autoscaley_on(False)
plt.hist(lambda_2_samples, histtype="stepfilled", bins=30, alpha=0.85,
        normed=True)
plt.xlim([150000,250000])
plt.xlabel("valor de $\lambda_2$")
plt.tick_params(axis="both", which="mayor", labelsize=4)

plt.subplot(313)
Example #12
0
for a, b in zip(midd, data):
    new_data.append(a)
    new_data.append(b)

print(new_data[:1])

u_labels = [
    '1e-7', '1e-7 end', '1e-6', '1e-6 end', '1e-5', '1e-5 end', '1e-4',
    '1e-4 end', '0.001', '0.001 end', '0.005', '0.005 end', '0.01', '0.01 end'
]

fig, ax = plt.subplots()

ax.set_xticklabels(u_labels, rotation='vertical', fontsize=9)
#ax.set_xticks(u_labels)
ax.set_yscale('log')
ax.set_xlabel('Initial Mutation Rates', fontsize=9)
ax.set_ylabel(opt.ty + ' Proliferation Rates', fontsize=9)
i = 0
print(ax.get_xticklabels())
ppl.boxplot(ax, new_data)  #,xticklabels=u_labels)

plt.title("Distribution of " + opt.ty +
          " proliferation rates categorised by initial mutation rate",
          fontsize=9)
fig.savefig(opt.output + 'pro_boxplot.png')
print(opt.output + "DONE")
#ppl.hist(ax,data)
#fig.savefig('histogram_prettyplotlib_default.png')
Example #13
0
ax1.plot(times,s,'r',label = 'True Sate')

#m = np.average(particles,weights=weights,axis=1)
#st = np.std(particles,weights=weights,axis=1)
#ext = (0.0,dt*timewindow,code.neurons[-1].theta,code.neurons[0].theta)
#plt.imshow(rates.T,extent=ext,cmap = cm.gist_yarg,aspect = 'auto',interpolation ='nearest')
thetas = [code.neurons[i].theta for i in sptrain]
ax1.plot(times[sptimes],thetas,'yo',label='Observed Spikes')
ax1.plot(times,m,'b',label='Posterior Mean')
ax1.plot(times,m-st,'gray',times,m+st,'gray')
#ax2 = plt.gcf().add_subplot(1,2,2)
#ax2.plot(times,s)
plt.xlabel('Time (in seconds)')
plt.ylabel('Space (in cm)')
plt.legend()
plt.title('State Estimation in a Diffusion System')



if plotting:
    
    #matplotlib.rcParams['font.size']=10
    
    if gaussian:
        fig, (ax1,ax2) = ppl.subplots(1,2,figsize = (12,6))
    else:
        fig, ax2 = ppl.subplots(1)
    times = np.arange(0.0,dt*timewindow,dt)
    if gaussian:    
        if sum(sum(spsg)) !=0:
            (ts,neurs) = np.where(spsg == 1)
Example #14
0
for ftype, fsize in size.items():
  sys.stderr.write('{} files contain {} bytes over {} files\n'.format(ftype.upper(), sum(fsize), len(files[ftype])))
###
# To upload to the correct spot on S3
# gzip *.paths
# s3cmd put --acl-public *.paths.gz s3://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-YYYY-WW/

###
# Plot
for ftype, fsize in size.items():
  if not fsize:
    continue
  plt.hist(fsize, bins=50)
  plt.xlabel('Size (bytes)')
  plt.ylabel('Count')
  plt.title('Distribution for {}'.format(ftype.upper()))
  plt.savefig(prefix + '{}_dist.pdf'.format(ftype))
  #plt.show(block=True)
  plt.close()

###
# Find missing WAT / WET files
warc = set([x.strip() for x in open(prefix + 'warc.paths').readlines()])
wat = [x.strip() for x in open(prefix + 'wat.paths').readlines()]
wat = set([x.replace('.warc.wat.', '.warc.').replace('/wat/', '/warc/') for x in wat])
# Work out the missing files and segments
missing = sorted(warc - wat)
missing_segments = defaultdict(list)
for fn in missing:
  start, suffix = fn.split('/warc/')
  segment = start.split('/')[-1]
Example #15
0
def run_xsc_phot(bcdphot_out_path, mosaic_path):
	replaced = {}
	for cat in find_files(bcdphot_out_path, "*_combined_hdr_catalog.txt"):

		print("\n======================================================")
		print("\nadjusting photometry in: {}".format(cat.split('/')[-1]))
		print("------------------------------------------------------")
		outpath = cat.replace('combined_hdr_catalog.txt','2mass_xsc.tbl')

		# retrieve 2mass data if file doesn't already exist (from previous run)
		if not os.path.isfile(outpath):
			# get url and retrieve data
			url = query_2mass_xsc_polygon(*get_region_corners(cat))
			print("\ndownloading 2MASS photometry from: {}".format(url))
			text = urllib2.urlopen(url).read()
			# write to disk
			with open(outpath, 'w') as f:
				f.write(text)
			print("\ncreated file: {}".format(outpath))

		# read back in as recarray	
		print("\nreading: {}".format(outpath))
		names = open(outpath).read().split('\n')[76].split('|')[1:-1]
		da = np.recfromtxt(outpath, skip_header=80, names=names)

		# write input file for xsc_phot.pro
		infile_outpath = '/'.join(cat.split('/')[:-1])+'/xsc.txt'
		with open(infile_outpath,'w') as w:
			for i in range(da.shape[0]):
				w.write("{} {} {} {}\n".format(da.designation[i], da.ra[i], da.dec[i], da.r_ext[i]))
		print("\ncreated input file for xsc_phot.pro: {}".format(infile_outpath))

		# locate the FITS mosaic file for xsc_phot.pro to do photometry on
		reg, ch = cat.split('/')[-1].split('_')[:2]
		mosaicfile = filter(lambda x: 'dirbe{}/ch{}/long/full/Combine'\
			.format(reg,ch) in x, find_files(mosaic_path, '*mosaic.fits'))[0]
		print("\nfound mosaic file: {}".format(mosaicfile))

		# spawn IDL subprocess running xsc_phot.pro and catch stdout in file
		outpath = infile_outpath.replace('xsc.txt', 'xsc_phot_out.txt')
		if not os.path.isfile(outpath):
			outfile = open(outpath,'w')
			print("\nspawning xsc_phot.pro IDL subprocess")
			cmd = "xsc_phot,'"+mosaicfile+"','"+infile_outpath+"','long'"
			rc = subprocess.call(['/usr/local/itt/idl71/bin/idl','-quiet','-e',cmd], 
				stderr = subprocess.PIPE, stdout = outfile)
			outfile.close()

		# read in output to recarray
		print("\nreading: {}".format(outpath))
		phot = np.recfromtxt(outpath, names=['id','flux','unc','sky','skyunc'])

		# make sure rows are aligned
		assert (da.designation == phot.id).all()

		# ignore xsc sources we got a NaN or negative flux for
		bad = np.isnan(phot.flux) | (phot.flux < 0)
		print("\naper.pro returned NaN or negative flux for {} sources".format(bad.sum()))
		if bad.sum() > 0:
			for i in phot[bad].id:
				print(i)
			outpath = cat.replace('combined_hdr_catalog.txt','xsc_nan_phot.csv')
			with open(outpath,'w') as f:
				w = csv.writer(f)
				w.writerow(da.dtype.names)
				w.writerows(da[bad].tolist())
			print('\ncreated file: {}'.format(outpath))
		phot = phot[~bad]
		da = da[~bad]

		# read in pipeline catalog
		print("\nreading: {}".format(cat))
		names = open(cat).readline().split()[1:]
		c = np.recfromtxt(cat, names=names)

		# loop through xsc sources and find matches in pipeline catalog
		print("\nfinding records associated with XSC sources in pipeline catalog")
		c_flux_total = []
		n_in_aper = []
		c_idx = []
		coords = radec_to_coords(c.ra, c.dec)
		kdt = KDT(coords)
		for i in range(phot.size):
			radius = da.r_ext[i]/3600.
			# idx1, idx2, ds = spherematch(da.ra[i], da.dec[i], 
			# 	c.ra, c.dec, tolerance=radius)
			idx, ds = spherematch2(da.ra[i], da.dec[i], c.ra, c.dec,
				kdt, tolerance=radius, k=500)
			# c_flux_total.append(c.flux[idx2].sum())
			# n_in_aper.append(c.flux[idx2].size)
			# c_idx.append(idx2.tolist())
			c_flux_total.append(c.flux[idx].sum())
			n_in_aper.append(ds.size)
			c_idx.append(idx.tolist())
		print("\nhistogram of source counts in r_ext aperture")
		for i in [(i,n_in_aper.count(i)) for i in set(n_in_aper)]:
			print i

		# create new version of catalog file with xsc-associated entries replaced
		c_idx = np.array(flatten(c_idx))
		print("\nremoving {}, adding {}".format(c_idx.size, phot.size))
		replaced[cat] = {'old':c_idx.size, 'new':phot.size}
		replaced[cat]['hist'] = [(i,n_in_aper.count(i)) for i in set(n_in_aper)]
		c = np.delete(c, c_idx)
		newrows = np.rec.array([(-i, da.ra[i], da.dec[i], 
			phot.flux[i], phot.unc[i], 1) for i in \
			range(phot.size)], dtype=c.dtype)
		newcat = np.hstack((c, newrows))

		# write new version of catalog to disk
		fmt = ['%i']+['%0.8f']*2+['%.4e']*2+['%i']
		outpath = cat.replace('catalog.txt', 'catalog_xsc_cor.txt')
		np.savetxt(outpath, newcat, fmt = fmt, header = ' '.join(names))
		print('\ncreated file: {}'.format(outpath))

		# make plot of total old vs. new flux
		plt.scatter(c_flux_total, phot.flux)
		ylim = plt.gca().get_ylim()
		plt.xlim(*ylim)
		max_y = ylim[1]
		plt.plot(ylim, ylim, 'r-')
		plt.xlabel('old flux [mJy]')
		plt.ylabel('new flux [mJy]')
		name = ' '.join(cat.split('/')[-1].split('_')[:2])
		plt.title(name)
		outpath = cat.replace('combined_hdr_catalog.txt','xsc_new_vs_old_phot.png')
		plt.savefig(outpath, dpi=200)
		plt.close()
		print('\ncreated file: {}'.format(outpath))

	outfile = 'xsc_replaced.json'
	json.dump(replaced, open(outfile,'w'))
	print("\ncreated file: {}".format(outfile))
	print("\nremoved / added")
	for k,v in replaced.iteritems():
		print k.split('/')[-1], v['old'], v['new']
	m = np.mean([i['old']/float(i['new']) for i in replaced.values()])
	print("average ratio: {}".format(m))
	print("\nK mag and r_ext of sources with NaN photometry:")
	for i in find_files(bcdphot_out_path, "*xsc_nan_phot.csv"):
		reg = i.split('/')[-1]
		rec = np.recfromcsv(i)
		bad_id = rec.designation.tolist()
		bad_k = rec.k_m_k20fe.tolist()
		bad_r_ext = rec.r_ext.tolist()
		print reg
		print ("\tid\t\t\tKmag\tr_ext")
		if type(bad_id) is list:
			seq = sorted(zip(bad_id, bad_k, bad_r_ext), key=lambda x: x[0])
			for j,k,l in seq:
				print("\t{}\t{}\t{}".format(j,k,l))
		else:
			print("\t{}\t{}\t{}".format(bad_id, bad_k, bad_r_ext))
Example #16
0
           X.append(i + 1)
           Y.append(err)
           incr_orthoY.append(check_orthogonality(uL[i]))
       incr_ortho.append(['iSVD u={}'.format(num), X, incr_orthoY])
       plt.plot(X, Y, label='iSVD u={}'.format(num))
   """
 print 'Testing raw SVD => exact reconstruction'
 svT = scipy.linalg.diagsvd(s, u.shape[0], vT.shape[1]).dot(vT)
 for y in xrange(train.shape[0]):
   for x in xrange(train.shape[1]):
     colU = u[y, :]
     rowV = svT[:, x]
     assert np.allclose(train[y, x], single_dot(u, svT, x, y))
 """
   ##
   plt.title('SVD reconstruction error on {}x{} matrix'.format(*train.shape))
   plt.xlabel('Low rank approximation (k)')
   plt.ylabel('Frobenius norm')
   plt.ylim(0, max(svdY))
   plt.legend(loc='best')
   plt.savefig('reconstruct_fro_{}x{}.pdf'.format(*train.shape))
   plt.show(block=True)
   ##
   plt.plot(orthoX,
            orthoY,
            label="SVD",
            color='black',
            linewidth=2,
            linestyle='--')
   for label, X, Y in incr_ortho:
       plt.plot(X, Y, label=label)
Example #17
0
width = 0.5

font = {'family' : 'normal',
        'weight' : 'normal',
        'size'   : 10}

#          k      c      b      m       r       o        char
vals = [ 45174 , 48810 , 41719 , 255017 , 642090 , 3053979 , 3066135 , 1731655 , 582226 , 126045 , 6738 ]
total = float(sum(vals))
print(vals)
vals = [(i / total)*100 for i in vals]
print(vals)
colors = ['LightGray','Gray','LightSkyBlue','RoyalBlue','LightSeaGreen','MediumSeaGreen','Khaki',\
        'Goldenrod','Tomato','MediumVioletRed','DarkViolet']

#plt.bar(ind, vals, width)
ppl.bar(ax, ind, vals, color=colors)
plt.xticks(ind+width/2.,\
        ('1e-7','1e-6','1e-5','1e-4','0.001','0.005','0.01', '0.05', '0.1', '0.2', '0.5')\
        ,fontsize=11)
plt.yticks(fontsize=11)
plt.title("Distribution of cells by mutation rate across resistant populations",fontsize=11)
#plt.ylim((0,100))
plt.ylabel("Percent of Total",fontsize=11)
plt.xlabel("Initial Mutation rate: u",fontsize=11)
plt.savefig(opt.filename+"_bar")
plt.clf()


###
Example #18
0
  svdY = []
  u, s, vT = scipy.linalg.svd(train)
  for k in xrange(1, 100):
    low_s = [s[i] for i in xrange(k)]  # + (min(u.shape[0], vT.shape[1]) - k) * [0]
    print 'Exact SVD with low-rank approximation {}'.format(k)
    svdX.append(k)
    svdY.append(get_error(u, np.diag(low_s), vT, train, test))
  plt.plot(svdX, svdY, label="SVD", color='black', linewidth='2', linestyle='--')
  """

    print
    print 'Testing incremental SVD'
    for num in xrange(400, 1001, 300):
        print '... with block size of {}'.format(num)
        X, Y = [], []
        for k in xrange(1, 91, 10):
            print k
            u, s, vT = incremental_SVD(train, k, num)
            X.append(k)
            Y.append(get_error(u, s, vT, train, test, prod_avg))
        plt.plot(X, Y, label='iSVD u={}'.format(num))
    ##
    plt.title(
        'Recommendation system RMSE on {}x{} matrix'.format(*train.shape))
    plt.xlabel('Low rank approximation (k)')
    plt.ylabel('Root Mean Squared Error')
    #plt.ylim(0, max(svdY))
    plt.legend(loc='best')
    plt.savefig('recommend_rmse_{}x{}.pdf'.format(*train.shape))
    plt.show(block=True)
Example #19
0
observation = pm.Poisson("obs", lambda_, value=datos, observed=True)
model = pm.Model([observation, lambda_1, lambda_2, tau])
mcmc = pm.MCMC(model)
mcmc.sample(50000, 10000, 1)

lambda_1_samples = mcmc.trace('lambda_1')[:]
lambda_2_samples = mcmc.trace('lambda_2')[:]

tau_samples = mcmc.trace('tau')[:]

fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1)

plt.rc('font', **{'family': 'DejaVu Sans'})
plt.subplot(311)
plt.title(u'''DistribuciĆ³n posterior de las variables
            $\lambda_1,\;\lambda_2,\;tau$''')
plt.hist(lambda_1_samples,
         histtype="stepfilled",
         bins=30,
         alpha=0.85,
         normed=True)
plt.xlim([150000, 250000])
plt.xlabel("valor de $\lambda_1$")

plt.subplot(312)
#ax.set_autoscaley_on(False)
plt.hist(lambda_2_samples,
         histtype="stepfilled",
         bins=30,
         alpha=0.85,
         normed=True)
##
import sys

if __name__ == '__main__':
  # Accumulate the counts fed into standard in (stdin)
  counts = []
  for line in sys.stdin:
    topic, count = line.split()
    # Shift the ones up by a tiny amount to allow them to be visible on the graph
    counts.append(int(count) + 0.05)

  print 'Total page view counts: {}'.format(len(counts))

  # Display the hourly page view distribution on a log-log plot
  # This matches our friendly and well understood Zipf distribution
  fig = plt.figure(figsize=(9, 5))
  ax = fig.add_subplot(1, 1, 1)
  plt.plot(xrange(len(counts)), counts, linewidth=3, label='Pageviews per page')
  #
  ax.set_xscale('log')
  ax.set_yscale('log')
  #
  plt.title('Log-log plot of hourly Wikipedia page view distribution')
  plt.xlabel('Rank order')
  plt.ylabel('Frequency')
  plt.grid(color='black')
  plt.legend()
  #
  plt.savefig('hourly_wikipedia_zipf.pdf')
  plt.show(block=True)
Example #21
0
def run_xsc_phot(bcdphot_out_path, mosaic_path):
    replaced = {}
    for cat in find_files(bcdphot_out_path, "*_combined_hdr_catalog.txt"):

        print("\n======================================================")
        print("\nadjusting photometry in: {}".format(cat.split('/')[-1]))
        print("------------------------------------------------------")
        outpath = cat.replace('combined_hdr_catalog.txt', '2mass_xsc.tbl')

        # retrieve 2mass data if file doesn't already exist (from previous run)
        if not os.path.isfile(outpath):
            # get url and retrieve data
            url = query_2mass_xsc_polygon(*get_region_corners(cat))
            print("\ndownloading 2MASS photometry from: {}".format(url))
            text = urllib2.urlopen(url).read()
            # write to disk
            with open(outpath, 'w') as f:
                f.write(text)
            print("\ncreated file: {}".format(outpath))

        # read back in as recarray
        print("\nreading: {}".format(outpath))
        names = open(outpath).read().split('\n')[76].split('|')[1:-1]
        da = np.recfromtxt(outpath, skip_header=80, names=names)

        # write input file for xsc_phot.pro
        infile_outpath = '/'.join(cat.split('/')[:-1]) + '/xsc.txt'
        with open(infile_outpath, 'w') as w:
            for i in range(da.shape[0]):
                w.write("{} {} {} {}\n".format(da.designation[i], da.ra[i],
                                               da.dec[i], da.r_ext[i]))
        print(
            "\ncreated input file for xsc_phot.pro: {}".format(infile_outpath))

        # locate the FITS mosaic file for xsc_phot.pro to do photometry on
        reg, ch = cat.split('/')[-1].split('_')[:2]
        mosaicfile = filter(lambda x: 'dirbe{}/ch{}/long/full/Combine'\
         .format(reg,ch) in x, find_files(mosaic_path, '*mosaic.fits'))[0]
        print("\nfound mosaic file: {}".format(mosaicfile))

        # spawn IDL subprocess running xsc_phot.pro and catch stdout in file
        outpath = infile_outpath.replace('xsc.txt', 'xsc_phot_out.txt')
        if not os.path.isfile(outpath):
            outfile = open(outpath, 'w')
            print("\nspawning xsc_phot.pro IDL subprocess")
            cmd = "xsc_phot,'" + mosaicfile + "','" + infile_outpath + "','long'"
            rc = subprocess.call(
                ['/usr/local/itt/idl71/bin/idl', '-quiet', '-e', cmd],
                stderr=subprocess.PIPE,
                stdout=outfile)
            outfile.close()

        # read in output to recarray
        print("\nreading: {}".format(outpath))
        phot = np.recfromtxt(outpath,
                             names=['id', 'flux', 'unc', 'sky', 'skyunc'])

        # make sure rows are aligned
        assert (da.designation == phot.id).all()

        # ignore xsc sources we got a NaN or negative flux for
        bad = np.isnan(phot.flux) | (phot.flux < 0)
        print("\naper.pro returned NaN or negative flux for {} sources".format(
            bad.sum()))
        if bad.sum() > 0:
            for i in phot[bad].id:
                print(i)
            outpath = cat.replace('combined_hdr_catalog.txt',
                                  'xsc_nan_phot.csv')
            with open(outpath, 'w') as f:
                w = csv.writer(f)
                w.writerow(da.dtype.names)
                w.writerows(da[bad].tolist())
            print('\ncreated file: {}'.format(outpath))
        phot = phot[~bad]
        da = da[~bad]

        # read in pipeline catalog
        print("\nreading: {}".format(cat))
        names = open(cat).readline().split()[1:]
        c = np.recfromtxt(cat, names=names)

        # loop through xsc sources and find matches in pipeline catalog
        print(
            "\nfinding records associated with XSC sources in pipeline catalog"
        )
        c_flux_total = []
        n_in_aper = []
        c_idx = []
        coords = radec_to_coords(c.ra, c.dec)
        kdt = KDT(coords)
        for i in range(phot.size):
            radius = da.r_ext[i] / 3600.
            # idx1, idx2, ds = spherematch(da.ra[i], da.dec[i],
            # 	c.ra, c.dec, tolerance=radius)
            idx, ds = spherematch2(da.ra[i],
                                   da.dec[i],
                                   c.ra,
                                   c.dec,
                                   kdt,
                                   tolerance=radius,
                                   k=500)
            # c_flux_total.append(c.flux[idx2].sum())
            # n_in_aper.append(c.flux[idx2].size)
            # c_idx.append(idx2.tolist())
            c_flux_total.append(c.flux[idx].sum())
            n_in_aper.append(ds.size)
            c_idx.append(idx.tolist())
        print("\nhistogram of source counts in r_ext aperture")
        for i in [(i, n_in_aper.count(i)) for i in set(n_in_aper)]:
            print i

        # create new version of catalog file with xsc-associated entries replaced
        c_idx = np.array(flatten(c_idx))
        print("\nremoving {}, adding {}".format(c_idx.size, phot.size))
        replaced[cat] = {'old': c_idx.size, 'new': phot.size}
        replaced[cat]['hist'] = [(i, n_in_aper.count(i))
                                 for i in set(n_in_aper)]
        c = np.delete(c, c_idx)
        newrows = np.rec.array([(-i, da.ra[i], da.dec[i],
         phot.flux[i], phot.unc[i], 1) for i in \
         range(phot.size)], dtype=c.dtype)
        newcat = np.hstack((c, newrows))

        # write new version of catalog to disk
        fmt = ['%i'] + ['%0.8f'] * 2 + ['%.4e'] * 2 + ['%i']
        outpath = cat.replace('catalog.txt', 'catalog_xsc_cor.txt')
        np.savetxt(outpath, newcat, fmt=fmt, header=' '.join(names))
        print('\ncreated file: {}'.format(outpath))

        # make plot of total old vs. new flux
        plt.scatter(c_flux_total, phot.flux)
        ylim = plt.gca().get_ylim()
        plt.xlim(*ylim)
        max_y = ylim[1]
        plt.plot(ylim, ylim, 'r-')
        plt.xlabel('old flux [mJy]')
        plt.ylabel('new flux [mJy]')
        name = ' '.join(cat.split('/')[-1].split('_')[:2])
        plt.title(name)
        outpath = cat.replace('combined_hdr_catalog.txt',
                              'xsc_new_vs_old_phot.png')
        plt.savefig(outpath, dpi=200)
        plt.close()
        print('\ncreated file: {}'.format(outpath))

    outfile = 'xsc_replaced.json'
    json.dump(replaced, open(outfile, 'w'))
    print("\ncreated file: {}".format(outfile))
    print("\nremoved / added")
    for k, v in replaced.iteritems():
        print k.split('/')[-1], v['old'], v['new']
    m = np.mean([i['old'] / float(i['new']) for i in replaced.values()])
    print("average ratio: {}".format(m))
    print("\nK mag and r_ext of sources with NaN photometry:")
    for i in find_files(bcdphot_out_path, "*xsc_nan_phot.csv"):
        reg = i.split('/')[-1]
        rec = np.recfromcsv(i)
        bad_id = rec.designation.tolist()
        bad_k = rec.k_m_k20fe.tolist()
        bad_r_ext = rec.r_ext.tolist()
        print reg
        print("\tid\t\t\tKmag\tr_ext")
        if type(bad_id) is list:
            seq = sorted(zip(bad_id, bad_k, bad_r_ext), key=lambda x: x[0])
            for j, k, l in seq:
                print("\t{}\t{}\t{}".format(j, k, l))
        else:
            print("\t{}\t{}\t{}".format(bad_id, bad_k, bad_r_ext))
Example #22
0
    #data = np.random.randn(8, 4)

    u_labels = [
        '1e-7', '1e-6', '1e-5', '1e-4', '0.001', '0.005', '0.01', '0.05', '0.1'
    ]

    fig, ax = plt.subplots()

    ax.set_xticklabels(u_labels)
    ax.set_xticks(u_labels)
    ax.set_xlabel('Initial Mutation Rates', fontsize=9)
    ax.set_ylabel('End Time', fontsize=9)
    ax.set_yscale('log')
    i = 0
    print(ax.get_xticklabels())
    ppl.boxplot(ax, data)  #,xticklabels=u_labels)

    t = [
        "time taken to reach full size", "time taken until crash",
        "time taken until recovery"
    ]
    tt = ["pre", "crash", "post"]

    plt.title("distribution of " + t[j] + " categorised by mutation rate",
              fontsize=9)
    fig.savefig(opt.output + tt[j] + '_boxplot.png')
    print(opt.output + "DONE")
    #ppl.hist(ax,data)
    #fig.savefig('histogram_prettyplotlib_default.png')
Example #23
0
     X.append(i + 1)
     Y.append(err)
     incr_orthoY.append(check_orthogonality(uL[i]))
   incr_ortho.append(['iSVD u={}'.format(num), X, incr_orthoY])
   plt.plot(X, Y, label='iSVD u={}'.format(num))
 """
 print 'Testing raw SVD => exact reconstruction'
 svT = scipy.linalg.diagsvd(s, u.shape[0], vT.shape[1]).dot(vT)
 for y in xrange(train.shape[0]):
   for x in xrange(train.shape[1]):
     colU = u[y, :]
     rowV = svT[:, x]
     assert np.allclose(train[y, x], single_dot(u, svT, x, y))
 """
 ##
 plt.title('SVD reconstruction error on {}x{} matrix'.format(*train.shape))
 plt.xlabel('Low rank approximation (k)')
 plt.ylabel('Frobenius norm')
 plt.ylim(0, max(svdY))
 plt.legend(loc='best')
 plt.savefig('reconstruct_fro_{}x{}.pdf'.format(*train.shape))
 plt.show(block=True)
 ##
 plt.plot(orthoX, orthoY, label="SVD", color='black', linewidth=2, linestyle='--')
 for label, X, Y in incr_ortho:
   plt.plot(X, Y, label=label)
 plt.title('SVD orthogonality error on {}x{} matrix'.format(*train.shape))
 plt.xlabel('Low rank approximation (k)')
 plt.ylabel('Deviation from orthogonality')
 plt.semilogy()
 #plt.ylim(0, max(orthoY))
Example #24
0
  svdX = []
  svdY = []
  u, s, vT = scipy.linalg.svd(train)
  for k in xrange(1, 100):
    low_s = [s[i] for i in xrange(k)]  # + (min(u.shape[0], vT.shape[1]) - k) * [0]
    print 'Exact SVD with low-rank approximation {}'.format(k)
    svdX.append(k)
    svdY.append(get_error(u, np.diag(low_s), vT, train, test))
  plt.plot(svdX, svdY, label="SVD", color='black', linewidth='2', linestyle='--')
  """

  print
  print 'Testing incremental SVD'
  for num in xrange(400, 1001, 300):
    print '... with block size of {}'.format(num)
    X, Y = [], []
    for k in xrange(1, 91, 10):
      print k
      u, s, vT = incremental_SVD(train, k, num)
      X.append(k)
      Y.append(get_error(u, s, vT, train, test, prod_avg))
    plt.plot(X, Y, label='iSVD u={}'.format(num))
  ##
  plt.title('Recommendation system RMSE on {}x{} matrix'.format(*train.shape))
  plt.xlabel('Low rank approximation (k)')
  plt.ylabel('Root Mean Squared Error')
  #plt.ylim(0, max(svdY))
  plt.legend(loc='best')
  plt.savefig('recommend_rmse_{}x{}.pdf'.format(*train.shape))
  plt.show(block=True)