Exemple #1
0
 def atest_very_anisotropic(self):
     """ Made to find error in very anisotropic case close to upper layer
     """
     set_up_parameters = {
         "sigma_G": [1.0, 1.0, 1.0],  # Below electrode
         "sigma_T": [0.1, 0.1, 1.0],  # Tissue
         "sigma_S": [0.0, 0.0, 0.0],  # Saline
         "slice_thickness": 200,
         "steps": 2,
     }
     Moi = MoI(set_up_parameters=set_up_parameters)
     imem = 1.2
     a = set_up_parameters["slice_thickness"] / 2.0
     high_position = [0, 0, 90]
     low_position = [0, 0, -a + 10]
     x_array = np.linspace(-200, 200, 41)
     y_array = np.linspace(-100, 100, 21)
     values_high = []
     values_low = []
     for y in y_array:
         for x in x_array:
             values_high.append([x, y, Moi.ad_hoc_anisotropic(charge_pos=high_position, elec_pos=[x, y, -100])])
             values_low.append([x, y, Moi.ad_hoc_anisotropic(charge_pos=low_position, elec_pos=[x, y, -100])])
     values_high = np.array(values_high)
     values_low = np.array(values_low)
     pl.subplot(211)
     pl.scatter(values_high[:, 0], values_high[:, 1], c=values_high[:, 2])
     pl.axis("equal")
     pl.colorbar()
     pl.subplot(212)
     pl.scatter(values_low[:, 0], values_low[:, 1], c=values_low[:, 2])
     pl.colorbar()
     pl.axis("equal")
     pl.show()
Exemple #2
0
def test1():
    import numpy as np
    import pylab
    from scipy import sparse

    from regreg.algorithms import FISTA
    from regreg.atoms import l1norm
    from regreg.container import container
    from regreg.smooth import quadratic

    Y = np.random.standard_normal(500); Y[100:150] += 7; Y[250:300] += 14

    sparsity = l1norm(500, lagrange=1.0)
    #Create D
    D = (np.identity(500) + np.diag([-1]*499,k=1))[:-1]
    D = sparse.csr_matrix(D)

    fused = l1norm.linear(D, lagrange=19.5)
    loss = quadratic.shift(-Y, lagrange=0.5)

    p = container(loss, sparsity, fused)
    
    soln1 = blockwise([sparsity, fused], Y)

    solver = FISTA(p)
    solver.fit(max_its=800,tol=1e-10)
    soln2 = solver.composite.coefs

    #plot solution
    pylab.figure(num=1)
    pylab.clf()
    pylab.scatter(np.arange(Y.shape[0]), Y, c='r')
    pylab.plot(soln1, c='y', linewidth=6)
    pylab.plot(soln2, c='b', linewidth=2)
def window_fn_matrix(Q,N,num_remov=None,save_tag=None,lms=None):
    Q = n.matrix(Q); N = n.matrix(N)
    Ninv = uf.pseudo_inverse(N,num_remov=None) # XXX want to remove dynamically
    #print Ninv 
    info = n.dot(Q.H,n.dot(Ninv,Q))
    M = uf.pseudo_inverse(info,num_remov=num_remov)
    W = n.dot(M,info)

    if save_tag!=None:
        foo = W[0,:]
        foo = n.real(n.array(foo))
        foo.shape = (foo.shape[1]),
        print foo.shape
        p.scatter(lms[:,0],foo,c=lms[:,1],cmap=mpl.cm.PiYG,s=50)
        p.xlabel('l (color is m)')
        p.ylabel('W_0,lm')
        p.title('First Row of Window Function Matrix')
        p.colorbar()
        p.savefig('{0}/{1}_W.pdf'.format(fig_loc,save_tag))
        p.clf()

        print 'W ',W.shape
        p.imshow(n.real(W))
        p.title('Window Function Matrix')
        p.colorbar()
        p.savefig('{0}/{1}_W_im.pdf'.format(fig_loc,save_tag))
        p.clf()


    return W
def generate_pr_scatter_plots(
    query_prf, subject_prf, query_color="b", subject_color="r", x_label="Precision", y_label="Recall"
):
    """ Generate scatter plot of precision versus recall for query and subject results
        
        query_prf: precision, recall, and f-measure values as returned 
         from compute_prfs for query data
        subject_prf: precision, recall, and f-measure values as returned 
         from compute_prfs for subject data
        query_color: the color of the query points (defualt: blue)
        subject_color: the color of the subject points (defualt: red)
        x_label: x axis label for the plot (default: "Precision")
        y_label: y axis label for the plot (default: "Recall")
    
    """

    # Extract the query precisions and recalls and
    # generate a scatter plot
    query_precisions = [e[4] for e in query_prf]
    query_recalls = [e[5] for e in query_prf]
    scatter(query_precisions, query_recalls, c=query_color)

    # Extract the subject precisions and recalls and
    # generate a scatter plot
    subject_precisions = [e[4] for e in subject_prf]
    subject_recalls = [e[5] for e in subject_prf]
    scatter(subject_precisions, subject_recalls, c=subject_color)

    xlim(0, 1)
    ylim(0, 1)
    xlabel(x_label)
    ylabel(y_label)
Exemple #5
0
def geweke_plot(data, name, format='png', suffix='-diagnostic', path='./', fontmap = None, 
    verbose=1):
    # Generate Geweke (1992) diagnostic plots

    if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4}

    # Generate new scatter plot
    figure()
    x, y = transpose(data)
    scatter(x.tolist(), y.tolist())

    # Plot options
    xlabel('First iteration', fontsize='x-small')
    ylabel('Z-score for %s' % name, fontsize='x-small')

    # Plot lines at +/- 2 sd from zero
    pyplot((nmin(x), nmax(x)), (2, 2), '--')
    pyplot((nmin(x), nmax(x)), (-2, -2), '--')

    # Set plot bound
    ylim(min(-2.5, nmin(y)), max(2.5, nmax(y)))
    xlim(0, nmax(x))

    # Save to file
    if not os.path.exists(path):
        os.mkdir(path)
    if not path.endswith('/'):
        path += '/'
    savefig("%s%s%s.%s" % (path, name, suffix, format))
Exemple #6
0
def drunkTest(numTrials = 1000):
    #stepsTaken = [10, 100, 1000, 10000]
    stepsTaken = 1000
    
    for dClass in (UsualDrunk, ColdDrunk, EDrunk, PhotoDrunk, DDrunk):
        #initialize field
        field = Field()
        origin = Location(0, 0)
        
        # initialize drunk 
        drunk = dClass('Drunk')
        field.addDrunk(drunk, origin)

        x_pos, y_pos = [], [] # initialize to empty
        x, y = 0.0, 0.0
        
        for trial in range(numTrials): # trials 
            x, y = walkVector(field, drunk, stepsTaken)
            x_pos.append(x)
            y_pos.append(y)
            
        #pylab.plot(x_pos, y_pos, 'ro', s=5,
        #           label = dClass.__name__)
        pylab.scatter(x_pos, y_pos,s=5, color='red')
        pylab.title(str(dClass))
        pylab.xlabel('x')
        pylab.grid()
        pylab.xlim(-100, 100)
        pylab.ylim(-100,100)
        pylab.ylabel('y')
        pylab.show()
Exemple #7
0
def plotslice(pos,filename='',boxsize=100.):
    ng = pos.shape[0]
    M.clf()
    M.scatter(pos[ng/4,:,:,1].flatten(),pos[ng/4,:,:,2].flatten(),s=1.,lw=0.)
    M.axis('tight')
    if filename != '':
        M.savefig(filename)
Exemple #8
0
    def plot_margin(X1_train, X2_train, clf):
        def f(x, w, b, c=0):
            # given x, return y such that [x,y] in on the line
            # w.x + b = c
            return (-w[0] * x - b + c) / w[1]

        pl.plot(X1_train[:,0], X1_train[:,1], "ro")
        pl.plot(X2_train[:,0], X2_train[:,1], "bo")
        pl.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c="g")

        # w.x + b = 0
        a0 = -4; a1 = f(a0, clf.w, clf.b)
        b0 = 4; b1 = f(b0, clf.w, clf.b)
        pl.plot([a0,b0], [a1,b1], "k")

        # w.x + b = 1
        a0 = -4; a1 = f(a0, clf.w, clf.b, 1)
        b0 = 4; b1 = f(b0, clf.w, clf.b, 1)
        pl.plot([a0,b0], [a1,b1], "k--")

        # w.x + b = -1
        a0 = -4; a1 = f(a0, clf.w, clf.b, -1)
        b0 = 4; b1 = f(b0, clf.w, clf.b, -1)
        pl.plot([a0,b0], [a1,b1], "k--")

        pl.axis("tight")
        pl.show()
def pseudoSystem():

	#The corrolations discovered when answering this question shows the emergent effects of component evolution on CSE.
	#We can further study these correlations by creating an example component system consisting of many components where a a different component has a new version released every day.
	#By looking at users who Upgrade the system at different frequencies over 100 days, we present two graphs, Upgrade frequency to uttd and change.	
	
	
	l = 100
	uttdxy = []
	chxy = []
	for uf in range(1,20):
		uttd = range(uf)*(l*2/uf)
		uttd = uttd[1:l+1]
		uttdxy.append((uf,numpy.mean(uttd)))
		
		sh = [0]*(uf-1) + [uf]
		sh = sh*l
		sh = sh[:l]
		chxy.append((uf,sum(sh)))
		
	pylab.figure(20)
	x,y = zip(*sorted(uttdxy))
	pylab.plot(x,y)
	pylab.scatter(x,y)
	saveFigure("q1bpseudouttd")
	
	pylab.figure(21)
	
	x,y = zip(*sorted(chxy))
	pylab.plot(x,numpy.array(y))
	pylab.scatter(x,numpy.array(y))
	pylab.ylim([0,l+10])
	saveFigure("q1bpseudochange")
Exemple #10
0
def plot_iris_knn():
    iris = datasets.load_iris()
    X = iris.data[:, :2]  # we only take the first two features. We could
                        # avoid this ugly slicing by using a two-dim dataset
    y = iris.target

    knn = neighbors.KNeighborsClassifier(n_neighbors=3)
    knn.fit(X, y)

    x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
    y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
                         np.linspace(y_min, y_max, 100))
    Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    pl.figure()
    pl.pcolormesh(xx, yy, Z, cmap=cmap_light)

    # Plot also the training points
    pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
    pl.xlabel('sepal length (cm)')
    pl.ylabel('sepal width (cm)')
    pl.axis('tight')
Exemple #11
0
def plot_polynomial_regression():
    rng = np.random.RandomState(0)
    x = 2*rng.rand(100) - 1
    f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
    y = f(x) + .4 * rng.normal(size=100)

    x_test = np.linspace(-1, 1, 100)

    pl.figure()
    pl.scatter(x, y, s=4)

    X = np.array([x**i for i in range(5)]).T
    X_test = np.array([x_test**i for i in range(5)]).T
    regr = linear_model.LinearRegression()
    regr.fit(X, y)
    pl.plot(x_test, regr.predict(X_test), label='4th order')

    X = np.array([x**i for i in range(10)]).T
    X_test = np.array([x_test**i for i in range(10)]).T
    regr = linear_model.LinearRegression()
    regr.fit(X, y)
    pl.plot(x_test, regr.predict(X_test), label='9th order')

    pl.legend(loc='best')
    pl.axis('tight')
    pl.title('Fitting a 4th and a 9th order polynomial')

    pl.figure()
    pl.scatter(x, y, s=4)
    pl.plot(x_test, f(x_test), label="truth")
    pl.axis('tight')
    pl.title('Ground truth (9th order polynomial)')
Exemple #12
0
def plot_scatter(results, xvar='lat', yvar='max_CAPE'):
    plt.xlabel(xvar)
    plt.ylabel(yvar)

    for res in results:
        if res['max_CAPE'] != 0:
            plt.scatter(res[xvar], res[yvar])
def plot_values(X, Y, xlabel, ylabel, suffix, ptype='plot'):
    output_filename = constants.ATTRACTIVENESS_FOLDER_NAME + constants.DATASET + '_' + suffix

    X1 = [X[i] for i in range(len(X)) if X[i]>0 and Y[i]>0]
    Y1 = [Y[i] for i in range(len(X)) if X[i]>0 and Y[i]>0]
    X = X1
    Y = Y1
    
    pylab.close("all")
    
    pylab.figure(figsize=(8, 7))

    #pylab.rcParams.update({'font.size': 20})

    pylab.scatter(X, Y)
    
    #pylab.axis(vis.get_bounds(X, Y, False, False))

    #pylab.xscale('log')
    pylab.yscale('log')

    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)   
    #pylab.xlim(0.1,1)
    #pylab.ylim(ymin=0.01)
    #pylab.tight_layout()

    pylab.savefig(output_filename + '.pdf')
Exemple #14
0
def pair_plot(data, savefile=None, display=True, **kwargs):
    chan = data.channels
    l = len(chan)
    figure = pylab.figure()
    pylab.subplot(l, l, 1)
    for i in range(l):
        for j in range(i + 1):
            pylab.subplot(l, l, i * l + j + 1)
            if i == j:
                pylab.hist(data[:, i], bins=200, histtype='stepfilled')
            else:
                pylab.scatter(data[:, i], data[:, j], **kwargs)

            if j == 0:
                pylab.ylabel(chan[i])
            if i == l - 1:
                pylab.xlabel(chan[j])

    if display:
        pylab.show()

    if savefile:
        pylab.savefig(savefile)

    return figure
Exemple #15
0
def test():
    from pandas import DataFrame
    X = np.linspace(0.01, 1.0, 10)
    Y = np.log(X)
    Y -= Y.min()
    Y /= Y.max()
    Y *= 0.95

    #Y = X

    df = DataFrame({'X': X, 'Y': Y})
    P = Pareto(df, 'X', 'Y')

    data = []
    for val in np.linspace(0,1,15):
        data.append(dict(val=val, x=P.lookup_x(val), y=P.lookup_y(val)))
        pl.axvline(val, alpha=.5)
        pl.axhline(val, alpha=.5)
    dd = DataFrame(data)
    pl.scatter(dd.y, dd.val, lw=0, c='r')
    pl.scatter(dd.val, dd.x, lw=0, c='g')
    print dd

    #P.scatter(c='r', lw=0)
    P.show_frontier(c='r', lw=4)
    pl.show()
Exemple #16
0
def generateNetwork(aINDEX, dDIFF, AM, name):
    R = 100
    X = []
    Y = []
    Z = []

    for i in range(len(aINDEX)):
        #r = 100*R + sum(AM[i])*R
        r = 1000*R + 10000*abs(dDIFF[aINDEX[i]])*R
        t = 2*math.pi*random.random()
        X.append(r*math.cos(t))
        Y.append(r*math.sin(t))
        Z.append(sum(AM[i]))

    fig = P.figure()

    P.axis('off')
    P.scatter(X, Y, Z, edgecolor = '', c = 'lightblue', alpha = 0.5)

    for i in range(len(aINDEX)):
        for j in range(i):
            if sum(AM[i]) > 200 and sum(AM[j]) > 200:
                P.plot([X[i], X[j]], [Y[i], Y[j]],'k',lw = 0.01)

    for i in range(len(aINDEX)):
        if sum(AM[i])>200:
    
            P.text(X[i], Y[i], aINDEX[i], fontsize = 8)
        

    #P.show()
    fig.savefig('figures/' + name + '.png')
    return
Exemple #17
0
def plot(n):
    h = heighway(n)
    x, y = [], []
    for z in h:
        x.append(z.real), y.append(z.imag)
    scatter(x, y)
    show()
def plot_values(values, suffix):
    
    '''
    #bins = [math.pow(BASE, i) for i in range(int(math.log(max(degrees), BASE)))]
    bins = [i for i in range(max(degrees)+1) if i in degrees]
    print bins

    N, tempbins, temppatches = pylab.hist(degrees, bins)
    H = [[bins[i], float(N[i])/sum(N)] for i in range(len(N)) if N[i]>0]
    print N
    pylab.close()

    X = [h[0] for h in H if h[0]!=0]
    Y = [h[1] for h in H if h[0]!=0]
    '''

    X = range(len(values))
    Y = values
    
    output_filename = constants.CHARTS_FOLDER_NAME + 'time_ordered' + '_' + suffix

    pylab.figure(figsize=(9, 4))
    pylab.rcParams.update({'font.size': 20})
    #pylab.xscale('log')
    #pylab.yscale('log')
    pylab.scatter(X, Y)
    #pylab.xlabel('# of edges')
    #pylab.ylabel('Probability')
    #pylab.title(output_filename)
    pylab.savefig(output_filename + '.pdf')
    pylab.close()
Exemple #19
0
def plotTOPICS(name, sdDIFF, dKT):

    vCOMM =[]
    vDIFF = []

    for i in range(len(sdDIFF)):
        #print sdDIFF[i][0], -sdDIFF[i][1], dKT[sdDIFF[i][0]]                                                                                                                                                
        vDIFF.append(-sdDIFF[i][1])
        vCOMM.append(dKT[sdDIFF[i][0]])

    nDIFF = np.array(vDIFF)
    nCOMM = np.array(vCOMM)

    mDIFF = np.mean(nDIFF)
    sDIFF = np.std(nDIFF)

    mCOMM =np.mean(nCOMM)
    sCOMM =np.std(nCOMM)


    X = []
    Y = []
    for j in range(len(sdDIFF)):
        x = (max(nCOMM) - nCOMM[j])/(max(nCOMM) - min(nCOMM))
        y = (nDIFF[j] - min(nDIFF))/(max(nDIFF) - min(nDIFF))

        X.append(x)

        Y.append(y)

    fig = P.figure()
    P.scatter(X, Y)
    fig.savefig('figures/DISTR_' + name + '.png') 

    return X, Y
def main():
    args = sys.argv[1:]
    
    dataset_path = None
    if args and '-save' in args:
        try: dataset_path = args[args.index('-save') + 1]
        except: dataset_path = 'dataset.p'
        
    # Generate the dataset
    print "...Generating Dataset..."
    X1, Y1 = make_circles(n_samples=800, noise=0.07, factor=0.4)
    frac0 = len(np.where(Y1 == 0)[0]) / float(len(Y1))
    frac1 = len(np.where(Y1 == 1)[0]) / float(len(Y1))
    
    print "Percentage of '0' labels:", frac0
    print "Percentage of '1' labels:", frac1

    # (Optionally) save the dataset to DATASET_PATH
    if dataset_path:
        print "...Saving dataset to {0}...".format(dataset_path)
        pickle.dump((X1, Y1, frac0, frac1), open(dataset_path, 'wb'))

    # Plot the dataset
    print "...Showing dataset in new window..."
    pl.figure(figsize=(10, 8))
    pl.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)

    pl.subplot(111)
    pl.title("Our Dataset: N=200, '0': {0} '1': {1} ".format(frac0, frac1), fontsize="large")

    pl.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)

    pl.show()
    
    print "...Done."
def plot_vel_vs_h3(maps, out_suffix=''):
    xaxis = (np.arange(len(maps.velocity[0])) - ppxf_m31.bhpos_pix[0]) * 0.05
    yaxis = (np.arange(len(maps.velocity)) - ppxf_m31.bhpos_pix[1]) * 0.05
    yy, xx = np.meshgrid(xaxis, yaxis)
    radius = np.hypot(xx, yy)
    good = np.where((np.abs(yy) < 0.5) & (np.abs(xx) < 1.0))
        
    plt.scatter(maps.velocity[good], maps.h3[good], c=maps.sigma[good], s=5,
                    marker='o', vmin=0, vmax=450)
    plt.xlim(-700, 0)
    plt.ylim(-0.5, 0.5)
    plt.colorbar(label='Sigma (km/s)')
    plt.axhline(linestyle='--', color='grey')
    plt.xlabel('Velocity (km/s)')
    plt.ylabel('h3')
    plt.savefig(plot_dir + 'vel_vs_h3' + out_suffix + '.png')

    plt.clf()
    plt.scatter(maps.sigma[good], maps.h3[good], c=maps.velocity[good], s=5,
                    marker='o', vmin=-700, vmax=0)
    plt.xlim(0, 450)
    plt.ylim(-0.5, 0.5)
    plt.colorbar(label='Velocity (km/s)')
    plt.axhline(linestyle='--', color='grey')
    plt.xlabel('Sigma (km/s)')
    plt.ylabel('h3')
    plt.savefig(plot_dir + 'sig_vs_h3' + out_suffix + '.png')

    return
Exemple #22
0
def test_regress_vary_na(baselines, coeffs, nants=32, restrictChi=False):
    """
    This function runs many tests of the linear regression, varying the number 
    of antennae in the array. Again, the global signal is hard-coded to be 1.
    """
    for jj in n.arange(100):
        nas = n.arange(2, nants)
        gs_diff = n.zeros(len(nas))
        for ii, na in enumerate(nas):
            gs_recov, redchi, err = test_regress(baselines, coeffs, gs=1, n_sig=0.1, na=na, readFromFile=True)
            if restrictChi:
                if n.absolute(redchi - 1) < 0.1:
                    gs_diff[ii] = gs_recov - 1.0
                else:
                    gs_diff[ii] = None
            else:
                gs_diff[ii] = gs_recov - 1.0
        p.scatter(nas, gs_diff)
    p.xlabel("Number of antenna")
    p.ylabel("Difference between true and recovered global signal")
    # p.show()
    if restrictChi:
        p.savefig("./figures/gs_diff_vs_na_good_chi.pdf")
    else:
        p.savefig("./figures/gs_diff_vs_na.pdf")
    p.clf()
def plot_contour(X, X1, X2, clf, title):
    pl.figure()
    pl.title(title)

    # Plot instances of class 1.
    pl.plot(X1[:, 0], X1[:, 1], "ro")
    # Plot instances of class 2.
    pl.plot(X2[:, 0], X2[:, 1], "bo")

    # Select "support vectors".
    if hasattr(clf, "support_vectors_"):
        sv = clf.support_vectors_
    else:
        sv = X[clf.coef_.ravel() != 0]

    # Plot support vectors.
    pl.scatter(sv[:, 0], sv[:, 1], s=100, c="g")

    # Plot decision surface.
    A, B = np.meshgrid(np.linspace(-6, 6, 50), np.linspace(-6, 6, 50))
    C = np.array([[x1, x2] for x1, x2 in zip(np.ravel(A), np.ravel(B))])
    Z = clf.decision_function(C).reshape(A.shape)
    pl.contour(A, B, Z, [0.0], colors="k", linewidths=1, origin="lower")

    pl.axis("tight")
Exemple #24
0
    def plot(self):
        f = pylab.figure(figsize=(8,4))
        co = [] #colors container
        for zScore, r in itertools.izip(self.zScores, self.log2Ratio):
            if zScore < self.pCut:
                if r > 0:
                    co.append(Colors().greenColor)
                elif r < 0:
                    co.append(Colors().redColor)
                else:
                    raise Exception
            else:
                co.append(Colors().blueColor)

        #print "Probability this is from a normal distribution: %.3e" %stats.normaltest(self.log2Ratio)[1]
        ax = f.add_subplot(121)
        pylab.axvline(self.meanLog2Ratio, color=Colors().redColor)
        pylab.axvspan(self.meanLog2Ratio-(2*self.stdLog2Ratio), 
                      self.meanLog2Ratio+(2*self.stdLog2Ratio), color=Colors().blueColor, alpha=0.2)
        his = pylab.hist(self.log2Ratio, bins=50, color=Colors().blueColor)
        pylab.xlabel("log2 Ratio %s/%s" %(self.sampleNames[1], self.sampleNames[0]))
        pylab.ylabel("Frequency")
        
        ax = f.add_subplot(122, aspect='equal')
        pylab.scatter(self.genes1, self.genes2, c=co, alpha=0.5)        
        pylab.ylabel("%s RPKM" %self.sampleNames[1])
        pylab.xlabel("%s RPKM" %self.sampleNames[0])
        pylab.yscale('log')
        pylab.xscale('log')
        pylab.tight_layout()
    def genderBoxplots(self, women, men, labels, path):
        data = [women.edition_count.values, men.edition_count.values]

        plt.figure()
        plt.boxplot(data)

        # mark the mean
        means = [np.mean(x) for x in data]
        print(means)

        plt.scatter(range(1, len(data) + 1), means, color="red", marker=">", s=20)
        plt.ylabel("num editions")
        plt.xticks(range(1, len(data) + 1), labels)
        plt.savefig(
            path + "/numeditions_gender_box_withOutlier" + self.pre + "-" + self.post + ".png", bbox_inches="tight"
        )

        plt.figure()
        plt.boxplot(data, sym="")
        # mark the mean
        means = [np.mean(x) for x in data]
        print(means)

        plt.scatter(range(1, len(data) + 1), means, color="red", marker=">", s=20)
        plt.ylabel("num editions")
        plt.xticks(range(1, len(data) + 1), labels)
        plt.savefig(path + "/numeditions_gender_box" + self.pre + "-" + self.post + ".png", bbox_inches="tight")
Exemple #26
0
def plot_all(x, y):
	pca = PCA(n_components=2)
	new_x = pca.fit(x).transform(x)
	for i in range(0, len(new_x)):
		l_color = color_list[y[i]]
		pl.scatter(new_x[i, 0], new_x[i, 1], color=l_color)
	pl.show()
Exemple #27
0
def linearReg():
    from sklearn import datasets
    diabetes = datasets.load_diabetes()
    diabetes_X_train = diabetes.data[:-20]
    diabetes_X_test = diabetes.data[-20:]
    diabetes_y_train = diabetes.target[:-20]
    diabetes_y_test = diabetes.target[-20:]
    from sklearn import linear_model
    regr = linear_model.LinearRegression()
    regr.fit(diabetes_X_train, diabetes_y_train)
    print(regr.coef_)
    import numpy as np
    np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2)
    regr.score(diabetes_X_test, diabetes_y_test)

    X = np.c_[.5, 1].T
    y = [.5, 1]
    test = np.c_[0, 2].T
    regr = linear_model.LinearRegression()

    import pylab as pl
    pl.figure()
    np.random.seed(0)
    for _ in range(6):
        this_X = .1 * np.random.normal(size=(2, 1)) + X
        regr.fit(this_X, y)
        pl.plot(test, regr.predict(test))
        pl.scatter(this_X, y, s=3)
 def scatter_from_csv(self, filename, sand = 'sand', silt = 'silt', clay = 'clay', diameter = '', hue = '', tags = '', **kwargs):
     """Loads data from filename (expects csv format). Needs one header row with at least the columns {sand, silt, clay}. Can also plot two more variables for each point; specify the header value for columns to be plotted as diameter, hue. Can also add a text tag offset from each point; specify the header value for those tags.
     Note! text values (header entries, tag values ) need to be quoted to be recognized as text. """
     fh = file(filename, 'rU')
     soilrec = csv2rec(fh)
     count = 0
     if (sand in soilrec.dtype.names):
         count = count + 1
     if (silt in soilrec.dtype.names):
         count = count + 1
     if (clay in soilrec.dtype.names):
         count = count + 1
     if (count < 3):
         print "ERROR: need columns for sand, silt and clay identified in ', filename"
     locargs = {'s': None, 'c': None}
     for (col, key) in ((diameter, 's'), (hue, 'c')):
         col = col.lower()
         if (col != '') and (col in soilrec.dtype.names):
             locargs[key] = soilrec.field(col)
         else:
             print 'ERROR: did not find ', col, 'in ', filename
     for k in kwargs:
         locargs[k] = kwargs[k]
     values = zip(*[soilrec.field(sand), soilrec.field(clay), soilrec.field(silt)])
     print values
     (xs, ys) = self._toCart(values)
     p.scatter(xs, ys, label='_', **locargs)
     if (tags != ''):
         tags = tags.lower()
         for (x, y, tag) in zip(*[xs, ys, soilrec.field(tags)]):
             print x,
             print y,
             print tag
             p.text(x + 1, y + 1, tag, fontsize=12)
     fh.close()
 def draw_cluster(self):
     for station in self.cluster.stations:
         for detector in station.detectors:
             x, y = detector.get_xy_coordinates()
             plt.scatter(x, y, c='r', s=5, edgecolor='none')
         x, y, alpha = station.get_xyalpha_coordinates()
         plt.scatter(x, y, c='orange', s=10, edgecolor='none')
Exemple #30
0
def test_regress_vary_bsln(baselines, coeffs, nants=32, restrictChi=False):
    """
    This is the exact same function as test_regress_vary_na except that it 
    plots the number of baselines on the x axis instead of the number of 
    antennae.
    """
    for jj in n.arange(100):
        nas = n.arange(2, nants)
        gs_diff = n.zeros(len(nas))
        for ii, na in enumerate(nas):
            gs_recov, redchi, err = test_regress(baselines, coeffs, gs=1, n_sig=0.1, na=na, readFromFile=True)
            if restrictChi:
                if n.absolute(redchi - 1) < 0.1:
                    gs_diff[ii] = gs_recov - 1.0
                else:
                    gs_diff[ii] = None
            else:
                gs_diff[ii] = gs_recov - 1.0
        p.scatter(nas * (nas - 1) / 2, gs_diff)
    p.xlabel("Number of baselines")
    p.ylabel("Difference between true and recovered global signal")
    # p.show()
    if restrictChi:
        p.savefig("./figures/gs_diff_vs_bsln_good_chi.pdf")
    else:
        p.savefig("./figures/gs_diff_vs_bsln.pdf")
    p.clf()
Exemple #31
0
fignum = 1
# we create an instance of Neighbours Classifier and fit the data.
for name, clf in classifiers.iteritems():
    clf.fit(X, Y)

    # Plot the decision boundary. For that, we will asign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    x_min, x_max = X[:,0].min() - .5, X[:,0].max() + .5
    y_min, y_max = X[:,1].min() - .5, X[:,1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    pl.figure(fignum, figsize=(4, 3))
    pl.set_cmap(pl.cm.Paired)
    pl.pcolormesh(xx, yy, Z)

    # Plot also the training points
    pl.scatter(X[:,0], X[:,1], c=Y)
    pl.xlabel('Sepal length')
    pl.ylabel('Sepal width')

    pl.xlim(xx.min(), xx.max())
    pl.ylim(yy.min(), yy.max())
    pl.xticks(())
    pl.yticks(())
    fignum += 1

pl.show()
Exemple #32
0
    def plot(self,
             to_plot=None,
             do_save=None,
             fig_path=None,
             fig_args=None,
             plot_args=None,
             scatter_args=None,
             axis_args=None,
             as_dates=True,
             interval=None,
             dateformat=None,
             font_size=18,
             font_family=None,
             use_grid=True,
             use_commaticks=True,
             do_show=True,
             verbose=None):
        '''
        Plot the results -- can supply arguments for both the figure and the plots.

        Args:
            to_plot (dict): Nested dict of results to plot; see default_sim_plots for structure
            do_save (bool or str): Whether or not to save the figure. If a string, save to that filename.
            fig_path (str): Path to save the figure
            fig_args (dict): Dictionary of kwargs to be passed to pl.figure()
            plot_args (dict): Dictionary of kwargs to be passed to pl.plot()
            scatter_args (dict): Dictionary of kwargs to be passed to pl.scatter()
            axis_args (dict): Dictionary of kwargs to be passed to pl.subplots_adjust()
            as_dates (bool): Whether to plot the x-axis as dates or time points
            interval (int): Interval between tick marks
            dateformat (str): Date string format, e.g. '%B %d'
            font_size (int): Size of the font
            font_family (str): Font face
            use_grid (bool): Whether or not to plot gridlines
            use_commaticks (bool): Plot y-axis with commas rather than scientific notation
            do_show (bool): Whether or not to show the figure
            verbose (bool): Display a bit of extra information

        Returns:
            fig: Figure handle
        '''

        if verbose is None:
            verbose = self['verbose']
        sc.printv('Plotting...', 1, verbose)

        if to_plot is None:
            to_plot = default_sim_plots
        to_plot = sc.odict(to_plot)  # In case it's supplied as a dict

        # Handle input arguments -- merge user input with defaults
        fig_args = sc.mergedicts({'figsize': (16, 14)}, fig_args)
        plot_args = sc.mergedicts({'lw': 3, 'alpha': 0.7}, plot_args)
        scatter_args = sc.mergedicts({'s': 70, 'marker': 's'}, scatter_args)
        axis_args = sc.mergedicts(
            {
                'left': 0.1,
                'bottom': 0.05,
                'right': 0.9,
                'top': 0.97,
                'wspace': 0.2,
                'hspace': 0.25
            }, axis_args)

        fig = pl.figure(**fig_args)
        pl.subplots_adjust(**axis_args)
        pl.rcParams['font.size'] = font_size
        if font_family:
            pl.rcParams['font.family'] = font_family

        res = self.results  # Shorten since heavily used

        # Plot everything
        for p, title, keylabels in to_plot.enumitems():
            ax = pl.subplot(len(to_plot), 1, p + 1)
            for key in keylabels:
                label = res[key].name
                this_color = res[key].color
                y = res[key].values
                pl.plot(res['t'], y, label=label, **plot_args, c=this_color)
                if self.data is not None and key in self.data:
                    pl.scatter(self.data['day'],
                               self.data[key],
                               c=[this_color],
                               **scatter_args)
            if self.data is not None and len(self.data):
                pl.scatter(pl.nan,
                           pl.nan,
                           c=[(0, 0, 0)],
                           label='Data',
                           **scatter_args)

            pl.grid(use_grid)
            cvu.fixaxis(self)
            if use_commaticks:
                sc.commaticks()
            pl.title(title)

            # Optionally reset tick marks (useful for e.g. plotting weeks/months)
            if interval:
                xmin, xmax = ax.get_xlim()
                ax.set_xticks(pl.arange(xmin, xmax + 1, interval))

            # Set xticks as dates
            if as_dates:
                xticks = ax.get_xticks()
                xticklabels = self.inds2dates(xticks, dateformat=dateformat)
                ax.set_xticklabels(xticklabels)

            # Plot interventions
            for intervention in self['interventions']:
                intervention.plot(self, ax)

        # Ensure the figure actually renders or saves
        if do_save:
            if fig_path is None:  # No figpath provided - see whether do_save is a figpath
                if isinstance(do_save, str):
                    fig_path = do_save  # It's a string, assume it's a filename
                else:
                    fig_path = 'covasim.png'  # Just give it a default name
            fig_path = sc.makefilepath(
                fig_path)  # Ensure it's valid, including creating the folder
            pl.savefig(fig_path)

        if do_show:
            pl.show()
        else:
            pl.close(fig)

        return fig
    point_on_plane = np.eye(n_obj)[0]  # Point on Das-Dennis
    reference_directions = get_ref_dirs_from_n(n_obj, 21)  # Das-Dennis points

    for point in ref_point:
        # ref_proj = point - np.dot(point - point_on_plane, n_vector) * n_vector
        # TODO: Compute which is faster, a copy.deepcopy, or recomputing all the points from get_ref_dirs_from_n
        ref_dir = copy.deepcopy(
            reference_directions)  # Copy of computed reference directions
        for i in range(n_obj):  # Shrink Das-Dennis points by a factor of alpha
            ref_dir[:, i] = point[i] + alpha * (ref_dir[:, i] - point[i])
        for d in ref_dir:  # Project shrunked Das-Dennis points back onto original Das-Dennis hyperplane
            ref_dirs.append(d -
                            np.dot(d - point_on_plane, n_vector) * n_vector)
    # TODO: Extreme points are only extreme of the scale is normalized between 0-1, how to make them truly extreme?
    ref_dirs.extend(np.eye(n_obj))  # Add extreme points
    return np.array(ref_dirs)


if __name__ == '__main__':

    test = get_ref_dirs_from_n(2, 100)

    # for i in [3]:
    #     for j in range(20):
    #         test = get_ref_dirs_from_section(i, j)
    #         print(j, len(test), get_number_of_reference_directions(i, j))
    # print()
    import pylab as pl
    fig = pl.subplot()
    pl.scatter(test[:, 0], test[:, 1])
Exemple #34
0
a=-w[0]/w[1]
xx=np.linspace(-5,5)
yy=a*xx-(clf.intercept_[0])/w[1]

b=clf.support_vectors_[0]
yy_down=a*xx+(b[1]-a*b[0])
b=clf.support_vectors_[-1]
yy_up=a*xx+(b[1]-a*b[0])

print "w:",w
print "a:",a

print "support_vectors_",clf.support_vectors_
print "clf.coef_",clf.coef_

pl.plot(xx,yy,'k-')
pl.plot(xx,yy_down,'k--')
pl.plot(xx,yy_up,'k--')
pl.scatter(clf.support_vectors_[:,0],clf.support_vectors_[:,-1],s=80,facecolors='none')
pl.scatter(X[:,0],X[:,-1],c=Y,cmap=pl.cm.Paired)    #scatter显示出离散的点
pl.axis('tight')
pl.show()








Exemple #35
0
def discrepancy_plot(data,
                     name='discrepancy',
                     report_p=True,
                     format='png',
                     suffix='-gof',
                     path='./',
                     fontmap=None):
    '''
    Generate goodness-of-fit deviate scatter plot.
    
    :Arguments:
        data: list
            List (or list of lists for vector-valued variables) of discrepancy values, output
            from the `pymc.diagnostics.discrepancy` function .

        name: string
            The name of the plot.
            
        report_p: bool
            Flag for annotating the p-value to the plot.

        format (optional): string
            Graphic output format (defaults to png).

        suffix (optional): string
            Filename suffix (defaults to "-gof").

        path (optional): string
            Specifies location for saving plots (defaults to local directory).

        fontmap (optional): dict
            Font map for plot.
    
    '''

    if verbose > 0:
        print_('Plotting', name + suffix)

    if fontmap is None:
        fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}

    # Generate new scatter plot
    figure()
    try:
        x, y = transpose(data)
    except ValueError:
        x, y = data
    scatter(x, y)

    # Plot x=y line
    lo = nmin(ravel(data))
    hi = nmax(ravel(data))
    datarange = hi - lo
    lo -= 0.1 * datarange
    hi += 0.1 * datarange
    pyplot((lo, hi), (lo, hi))

    # Plot options
    xlabel('Observed deviates', fontsize='x-small')
    ylabel('Simulated deviates', fontsize='x-small')

    if report_p:
        # Put p-value in legend
        count = sum(s > o for o, s in zip(x, y))
        text(lo + 0.1 * datarange,
             hi - 0.1 * datarange,
             'p=%.3f' % (count / len(x)),
             horizontalalignment='center',
             fontsize=10)

    # Save to file
    if not os.path.exists(path):
        os.mkdir(path)
    if not path.endswith('/'):
        path += '/'
    savefig("%s%s%s.%s" % (path, name, suffix, format))
    def runMetroSingleChain(self, individual0, NSteps=1000, chain_dict={}):

        df = self.PM.NPixListData
        self.rv = chi2(df)
        _, Chi2 = self.GiveFitness(individual0)
        self.MinChi2 = Chi2
        logProb = self.rv.logpdf(Chi2)

        x = np.linspace(0, 2 * self.rv.moment(1), 1000)
        lP = self.rv.logpdf(x)
        iMax = np.argmax(lP)
        self.Chi2PMax = x[iMax]

        # #####################
        # # V0
        #self.Var=self.MinChi2/self.Chi2PMax
        #Chi20_n=self.MinChi2/self.Var
        #VarMin=(3e-3)**2
        #ThVar=np.max([self.Var,VarMin])
        #ShrinkFactor=np.min([1.,self.Var/ThVar])
        # # print
        # # print ShrinkFactor
        # # print
        # # stop
        # #####################
        VarMin = (3e-4)**2
        #self.Var=np.max([self.EstimatedStdFromResid**2,VarMin])
        Var = self.MinChi2 / self.Chi2PMax
        S = self.PM.ArrayToSubArray(individual0, Type="S")
        B = np.sum(np.abs(S)) / float(S.size)
        B0 = 7e-4
        Sig0 = 3e-3
        Sig = B * Sig0 / B0

        # print
        # print "%f %f %f -> %f"%(B,B0,Sig0,Sig)
        # print

        self.Var = np.max([4. * self.EstimatedStdFromResid**2, Sig**2])

        Chi20_n = self.MinChi2 / self.Var
        ShrinkFactor = 1.
        # #####################

        DicoChains = {}
        Parms = individual0

        # ##################################
        DoPlot = True
        if DoPlot:
            import pylab
            pylab.figure(1)
            x = np.linspace(0, 2 * self.rv.MeanChi2, 1000)
            P = self.rv.pdf(x)
            pylab.clf()
            pylab.plot(x, P)
            Chi2Red = Chi2_0  #/self.Var
            pylab.scatter(Chi2Red, np.mean(P), c="black")
            pylab.draw()
            pylab.show(False)
        # ##################################

        # ##################################
        DoPlot = False
        # DoPlot=True
        if DoPlot:
            import pylab
            x = np.linspace(0, 2 * self.rv.moment(1), 1000)
            P = self.rv.pdf(x)
            pylab.clf()
            pylab.plot(x, P)
            pylab.scatter(Chi20_n, np.mean(P), c="black")
            pylab.draw()
            pylab.show(False)
        # ##################################

        DicoChains["Parms"] = []
        DicoChains["Chi2"] = []
        DicoChains["logProb"] = []
        logProb0 = self.rv.logpdf(Chi20_n)

        Mut_pFlux, Mut_p0, Mut_pMove = 0.2, 0., 0.3

        #T.disable()
        FactorAccelerate = 1.
        lAccept = []
        NBurn = self.GD["MetroClean"]["MetroNBurnin"]

        NSteps = NSteps + NBurn

        NAccepted = 0
        iStep = 0
        NMax = NSteps  #10000

        #for iStep in range(NSteps):
        while NAccepted < NSteps and iStep < NMax:
            iStep += 1
            #print "========================"
            #print iStep
            individual1, = self.MutMachine.mutGaussian(individual0.copy(),
                                                       Mut_pFlux, Mut_p0,
                                                       Mut_pMove)  #,
            #FactorAccelerate=FactorAccelerate)
            # ds=Noise
            # individual1,=self.MutMachine.mutNormal(individual0.copy(),ds*1e-1*FactorAccelerate)
            # #T.timeit("mutate")

            _, Chi2 = self.GiveFitness(individual1)
            # if Chi2<self.MinChi2:
            #     self.Var=Chi2/self.Chi2PMax
            #     #print "           >>>>>>>>>>>>>> %f"%np.min(Chi2)

            Chi2_n = Chi2 / self.Var

            Chi2_n = Chi20_n + ShrinkFactor * (Chi2_n - Chi20_n)

            logProb = self.rv.logpdf(Chi2_n)

            p1 = logProb
            p0 = logProb0  #DicoChains["logProb"][-1]
            if p1 - p0 > 5:
                R = 1
            elif p1 - p0 < -5:
                R = 0
            else:
                R = np.min([1., np.exp(p1 - p0)])

            r = np.random.rand(1)[0]
            #print "%5.3f [%f -> %f]"%(R,p0,p1)
            # print "MaxDiff ",np.max(np.abs(self.pop[iChain]-DicoChains[iChain]["Parms"][-1]))
            lAccept.append((r < R))
            if r < R:  # accept
                individual0 = individual1
                logProb0 = logProb
                NAccepted += 1
                if NAccepted > NBurn:
                    DicoChains["logProb"].append(p1)
                    DicoChains["Parms"].append(individual1)
                    DicoChains["Chi2"].append(Chi2_n)

                if DoPlot:
                    pylab.scatter(Chi2_n, np.exp(p1), lw=0)
                    pylab.draw()
                    pylab.show(False)
                    pylab.pause(0.1)

                # print "  accept"
                # # Model=self.StackChain()

                # # Asq=self.ArrayMethodsMachine.PM.ModelToSquareArray(Model,TypeInOut=("Parms","Parms"))
                # # _,npol,NPix,_=Asq.shape
                # # A=np.mean(Asq,axis=0).reshape((NPix,NPix))
                # # Mask=(A==0)
                # # pylab.clf()
                # # pylab.imshow(A,interpolation="nearest")
                # # pylab.draw()
                # # pylab.show(False)
                # # pylab.pause(0.1)

            else:

                # # #######################
                if DoPlot:
                    pylab.scatter(Chi2_n, np.exp(p1), c="red", lw=0)
                    pylab.draw()
                    pylab.show(False)
                    pylab.pause(0.1)
                # # #######################
                pass

            #T.timeit("Compare")

            AccRate = np.count_nonzero(lAccept) / float(len(lAccept))
            #print "[%i] Acceptance rate %f [%f with ShrinkFactor %f]"%(iStep,AccRate,FactorAccelerate,ShrinkFactor)
            if (iStep % 50 == 0) & (iStep > 10):
                if AccRate > 0.234:
                    FactorAccelerate *= 1.5
                else:
                    FactorAccelerate /= 1.5
                FactorAccelerate = np.min([3., FactorAccelerate])
                FactorAccelerate = np.max([.01, FactorAccelerate])
                lAccept = []
            #T.timeit("Acceptance")

        T.timeit("Chain")

        chain_dict["logProb"] = np.array(DicoChains["logProb"])
        chain_dict["Parms"] = np.array(DicoChains["Parms"])
        chain_dict["Chi2"] = np.array(DicoChains["Chi2"])
Exemple #37
0
rateVariationCell = []
rateVariationD2d = []
rateVariationBoth = []
print("\n Variation of Throughput With No. of D2D Users\n")
for Nd in Ndvariation:
    sys.stdout.write("\r")
    progress = int(100 * ((Nd - Ndmin) / (Ndmax - Ndmin - step)))
    percent = "{:2}".format(progress)
    sys.stdout.write(" " + percent + " % ")
    [sys.stdout.write("##") for x in range(int(Nd / step))]
    sys.stdout.flush()
    throughputCell, throughPutD2d = core.core(Rc, Pc, bw, N0, tSNR, cellUsers,
                                            Nc, Nd, Nrb, d2dDistance,
                                            rbPerD2DPair, RWindowSize, 400, False)
    rateVariationCell.append(throughputCell)
    rateVariationD2d.append(throughPutD2d)
    rateVariationBoth.append(throughputCell + throughPutD2d)
print("")
pl.figure(2)
pl.plot(Ndvariation, np.asarray(rateVariationCell) / 1e6, label="Cell Users")
pl.scatter(Ndvariation, np.asarray(rateVariationCell) / 1e6, marker=">")
pl.plot(Ndvariation, np.asarray(rateVariationD2d) / 1e6, label="D2D Pairs")
pl.scatter(Ndvariation, np.asarray(rateVariationD2d) / 1e6, marker=">")
pl.plot(Ndvariation, np.asarray(rateVariationBoth) / 1e6, label="Both")
pl.scatter(Ndvariation, np.asarray(rateVariationBoth) / 1e6, marker=">")
pl.xlabel("No. of D2D Pairs")
pl.ylabel("Throughput (Mbits/sec)")
pl.legend(loc="upper left")
pl.grid(True)
pl.show()
Exemple #38
0
    def test_budget(self):
        "Test the budget heuristic."
        w = np.random.uniform(-1, 1, size=self.D)
        w_orig = w.copy()

        def L0(threshold):
            ww = w_orig.copy()
            self.prox(threshold, ww)
            return (np.abs(ww) > 0).sum()

        # Check that the find_threshold gives a conservative estimate for L0 budget
        M = len(w)
        f = {}
        est = {}
        for budget in range(M + 1):
            est[budget] = self.find_threshold(budget, w)
            l0 = L0(est[budget])
            f[budget] = l0
            assert l0 <= budget
        # Check end points
        assert f[0] == 0
        assert f[M] == M

        # Check coverage against a numerical sweep.
        numerical_x = np.linspace(0, M + 1, 10000)
        numerical_y = np.array([L0(threshold) for threshold in numerical_x])
        heuristic_x = np.array(sorted(est.values()))
        heuristic_y = np.array([L0(threshold) for threshold in heuristic_x])

        if 0:
            pl.title('threshold vs L0 coverage')
            keep = numerical_y > 0
            pl.plot(numerical_x[keep],
                    numerical_y[keep],
                    c='b',
                    alpha=0.5,
                    lw=2,
                    label='numerical')
            pl.plot(heuristic_x,
                    heuristic_y,
                    alpha=0.5,
                    c='r',
                    lw=2,
                    label='heuristic')
            pl.scatter(heuristic_x, heuristic_y, lw=0)
            pl.legend(loc='best')
            pl.show()

        # How many operating points (budgets) do we miss that the numerical
        # method achieves?
        #
        # Note that we don't expect perfect coverage because the heuristic
        # pretends that groups don't overlap.
        #
        #  ^^ We appear to be getting great coverage. Should we revise this
        #     statement?
        numerical_points = list(sorted(set(numerical_y)))
        heuristic_points = list(sorted(set(heuristic_y)))
        print('numerical:', numerical_points)
        print('heuristic:', heuristic_points)

        recall = len(set(numerical_points) & set(heuristic_points)) / len(
            set(numerical_points))
        print('recall: %.2f' % recall)

        if 0:
            # This plot is for debugging the conservativeness of the budget
            # heuristic, which is now asserted above.
            pl.title('Ability to conservatively meet the budget')
            xs, ys = list(zip(*sorted(f.items())))
            pl.plot(xs, xs, c='k', alpha=0.5, linestyle=':')
            pl.plot(xs, ys, alpha=0.5, c='r', lw=2)
            pl.scatter(xs, ys, lw=0)
            pl.show()

        print('[test budget]', colors.light.green % 'pass')
Exemple #39
0
import numpy as np
import pylab as pl

N = 1000
n = 10
np.random.seed(3)  #use always the same seed
x, y = np.random.randn(2, N) / 10 + 0.5
X, Y = np.mgrid[0:1:n * 1j, 0:1:n * 1j]

xfloor = X[:, 0][np.floor(n * x).astype(int)]
yfloor = Y[0][np.floor(n * y).astype(int)]
z = xfloor + n * yfloor
Z = X + n * Y
histo = np.histogram(z.ravel(), bins=np.r_[Z.T.ravel(), 2 * n**2])

pl.pcolor(X - 1. / (2 * n), Y - 1. / (2 * n), histo[0].reshape(
    (n, n)))  #shifted to
# have centered bins
pl.scatter(x, y)
pl.show()
def test_one_feature_mixture(component_model_type,
                             num_clusters=3,
                             show_plot=False,
                             seed=None):
    """

    """
    random.seed(seed)

    N = 300
    separation = .9

    get_next_seed = lambda: random.randrange(2147483647)

    cluster_weights = [[1.0 / float(num_clusters)] * num_clusters]

    cctype = component_model_type.cctype
    T, M_c, structure = sdg.gen_data([cctype],
                                     N, [0],
                                     cluster_weights, [separation],
                                     seed=get_next_seed(),
                                     distargs=[distargs[cctype]],
                                     return_structure=True)

    T_list = list(T)
    T = numpy.array(T)

    # pdb.set_trace()
    # create a crosscat state
    M_c = du.gen_M_c_from_T(T_list, cctypes=[cctype])

    state = State.p_State(M_c, T_list)

    # Get support over all component models
    discrete_support = qtu.get_mixture_support(
        cctype,
        component_model_type,
        structure['component_params'][0],
        nbins=250)

    # calculate simple predictive probability for each point
    Q = [(N, 0, x) for x in discrete_support]

    # transitions
    state.transition(n_steps=200)

    # get the sample
    X_L = state.get_X_L()
    X_D = state.get_X_D()

    # generate samples
    # kstest has doesn't compute the same answer with row and column vectors
    # so we flatten this column vector into a row vector.
    predictive_samples = sdg.predictive_columns(
        M_c, X_L, X_D, [0], seed=get_next_seed()).flatten(1)

    probabilities = su.simple_predictive_probability(M_c, X_L, X_D,
                                                     [] * len(Q), Q)

    # get histogram. Different behavior for discrete and continuous types. For some reason
    # the normed property isn't normalizing the multinomial histogram to 1.
    # T = T[:,0]
    if is_discrete[component_model_type.model_type]:
        bins = range(len(discrete_support))
        T_hist = numpy.array(qtu.bincount(T, bins=bins))
        S_hist = numpy.array(qtu.bincount(predictive_samples, bins=bins))
        T_hist = T_hist / float(numpy.sum(T_hist))
        S_hist = S_hist / float(numpy.sum(S_hist))
        edges = numpy.array(discrete_support, dtype=float)
    else:
        T_hist, edges = numpy.histogram(T,
                                        bins=min(50, len(discrete_support)),
                                        normed=True)
        S_hist, _ = numpy.histogram(predictive_samples,
                                    bins=edges,
                                    normed=True)
        edges = edges[0:-1]

    # Goodness-of-fit-tests
    if not is_discrete[component_model_type.model_type]:
        # do a KS tests if the distribution in continuous
        # cdf = lambda x: component_model_type.cdf(x, model_parameters)
        # stat, p = stats.kstest(predictive_samples, cdf)   # 1-sample test
        stat, p = stats.ks_2samp(predictive_samples, T[:, 0])  # 2-sample test
        test_str = "KS"
    else:
        # Cressie-Read power divergence statistic and goodness of fit test.
        # This function gives a lot of flexibility in the method <lambda_> used.
        freq_obs = S_hist * N
        freq_exp = numpy.exp(probabilities) * N
        stat, p = stats.power_divergence(freq_obs, freq_exp, lambda_='pearson')
        test_str = "Chi-square"

    if show_plot:
        pylab.clf()
        lpdf = qtu.get_mixture_pdf(discrete_support, component_model_type,
                                   structure['component_params'][0],
                                   [1.0 / num_clusters] * num_clusters)
        pylab.axes([0.1, 0.1, .8, .7])
        # bin widths
        width = (numpy.max(edges) - numpy.min(edges)) / len(edges)
        pylab.bar(edges,
                  T_hist,
                  color='blue',
                  alpha=.5,
                  width=width,
                  label='Original data',
                  zorder=1)
        pylab.bar(edges,
                  S_hist,
                  color='red',
                  alpha=.5,
                  width=width,
                  label='Predictive samples',
                  zorder=2)

        # plot actual pdf of support given data params
        pylab.scatter(discrete_support,
                      numpy.exp(lpdf),
                      c="blue",
                      edgecolor="none",
                      s=100,
                      label="true pdf",
                      alpha=1,
                      zorder=3)

        # plot predictive probability of support points
        pylab.scatter(discrete_support,
                      numpy.exp(probabilities),
                      c="red",
                      edgecolor="none",
                      s=100,
                      label="predictive probability",
                      alpha=1,
                      zorder=4)

        pylab.legend()

        ylimits = pylab.gca().get_ylim()
        pylab.ylim([0, ylimits[1]])

        title_string = "%i samples drawn from %i %s components: \ninference after 200 crosscat transitions\n%s test: p = %f" \
            % (N, num_clusters, component_model_type.cctype, test_str, round(p,4))

        pylab.title(title_string, fontsize=12)

        filename = component_model_type.model_type + "_mixtrue.png"
        pylab.savefig(filename)
        pylab.close()

    return p
Exemple #41
0
    # Since we have class labels for the training data, we can
    # initialize the GMM parameters in a supervised manner.
    classifier.means_ = np.array(
        [X_train[y_train == i].mean(axis=0) for i in xrange(n_classes)])

    # Train the other parameters using the EM algorithm.
    classifier.fit(X_train)

    h = pl.subplot(2, n_classifiers / 2, index + 1)
    make_ellipses(classifier, h)

    for n, color in enumerate('rgb'):
        data = iris.data[iris.target == n]
        pl.scatter(data[:, 0],
                   data[:, 1],
                   0.8,
                   color=color,
                   label=iris.target_names[n])
    # Plot the test data with crosses
    for n, color in enumerate('rgb'):
        data = X_test[y_test == n]
        pl.plot(data[:, 0], data[:, 1], 'x', color=color)

    y_train_pred = classifier.predict(X_train)
    train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
    pl.text(0.05,
            0.9,
            'Train accuracy: %.1f' % train_accuracy,
            transform=h.transAxes)

    y_test_pred = classifier.predict(X_test)
Exemple #42
0
import random, pylab

meanArrival = 60
arrivals = []
for i in range(2000):
    interArrivalTime = random.expovariate(1.0 / meanArrival)
    arrivals.append(interArrivalTime)
ave = sum(arrivals) / len(arrivals)
print 'Distance from intended mean:', meanArrival - ave
xAxis = pylab.arange(0, len(arrivals), 1)
pylab.scatter(xAxis, arrivals)
pylab.axhline(meanArrival, linewidth=4)
pylab.title('Exponential Inter-arrival Times')
pylab.ylabel('Inter-arrival Time (secs)')
pylab.xlabel('Job Number')
pylab.figure()
pylab.hist(arrivals)
pylab.title('Exponential Inter-arrival Times')
pylab.xlabel('Inter-arrival Time (secs)')
pylab.ylabel('Number of Jobs')
pylab.show()
Exemple #43
0
def plot_samples(type, data, data_ql, samples, samples_ql, var_name1,
                 var_name2, scaler, ncomp, z, path):
    print('Plot samples')
    # type:         normalised data or original data
    # data:         data from LES output
    # data_ql:      ql data from LES output
    # samples:      sample data drawn from GMM PDF
    # samples_ql:   ql data calculated from sample data
    # var_name1:    thl or s
    # var_name :    qt
    # scaler:       scaling factors for normalisation
    # ncomp:        # of components for GMM PDF
    # path:         output path for saving figure

    # print('shapes: ', data.shape, data_ql.shape, samples.shape, samples_ql.shape)
    # print('min/max: ', np.amin(data_ql), np.amax(data_ql), np.amin(samples_ql), np.amax(samples_ql))

    # ql_min = np.min([np.amin(data_ql), np.amin(samples_ql)])
    ql_min = 0.0
    ql_max = np.max([np.amax(data_ql), np.amax(samples_ql)])

    scale_thl = scaler.scale_[0]
    scale_qt = scaler.scale_[1]
    xmin = np.min([np.amin(data[:, 0]), np.amin(samples[:, 0])])
    xmax = np.max([np.amax(data[:, 0]), np.amax(samples[:, 0])])
    ymin = np.min([np.amin(data[:, 1]), np.amin(samples[:, 1])])
    ymax = np.max([np.amax(data[:, 1]), np.amax(samples[:, 1])])
    cm = plt.cm.get_cmap('RdYlBu')
    cm = plt.cm.get_cmap('viridis')

    plt.figure(figsize=(8, 9))
    plt.subplot(2, 2, 1)
    plt.scatter(data[:, 0], data[:, 1], s=5, alpha=0.2)
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    if type == 'norm':
        plt.title('data norm (n=' + str(np.shape(data)[0]) + ')')
    else:
        plt.title('data (n=' + str(np.shape(data)[0]) + ')')
    labeling(var_name1, var_name2, scale_thl, scale_qt)

    plt.subplot(2, 2, 2)
    plt.scatter(samples[:, 0], samples[:, 1], s=5, alpha=0.2)
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    plt.title('samples (n=' + str(np.size(samples_ql)) + ')')
    labeling(var_name1, var_name2, 0, 0)

    plt.subplot(2, 2, 3)
    try:
        ax = plt.scatter(data[:, 0],
                         data[:, 1],
                         c=data_ql[:],
                         s=6,
                         alpha=0.5,
                         edgecolors='none',
                         vmin=ql_min,
                         vmax=ql_max)  #, cmap = cm)
        if np.amax(data_ql[:]) > 0.0:
            plt.colorbar(ax, shrink=0.6)
    except:
        print('except data - color arr: ', data.shape, data_ql.shape)
        # traceback.print_exc()
        color_arr = np.zeros(shape=np.size(data_ql))
        for i in range(np.size(data_ql)):
            color_arr[i] = data_ql[i] * 1e3
        print('data_ql except', np.amin(data_ql), np.amax(data_ql),
              np.amin(color_arr), np.amax(color_arr), np.shape(color_arr))
        print(color_arr.shape)
        print(color_arr)
        try:
            ax = plt.scatter(data[:, 0],
                             data[:, 1],
                             c=color_arr[:],
                             s=6,
                             alpha=0.5,
                             edgecolors='none',
                             vmin=ql_min,
                             vmax=ql_max)  # , cmap = cm)
            if np.amax(data_ql[:]) > 0.0:
                plt.colorbar(ax, shrink=0.6)
        except:
            # traceback.print_exc()
            print('except except (data ql)')
            ax = plt.scatter(data[:, 0],
                             data[:, 1],
                             s=6,
                             alpha=0.5,
                             edgecolors='none',
                             vmin=ql_min,
                             vmax=ql_max)  # , cmap = cm)
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    if type == 'norm':
        plt.title('data norm (n=' + str(np.shape(data)[0]) + ')')
    else:
        plt.title('data (n=' + str(np.shape(data)[0]) + ')')
    labeling(var_name1, var_name2, scale_thl, scale_qt)

    plt.subplot(2, 2, 4)
    color_arr = np.zeros(shape=np.size(samples_ql))
    for i in range(np.size(samples_ql)):
        color_arr[i] = samples_ql[i] * 1e3
    print('samples_ql ', np.amin(samples_ql), np.amax(samples_ql),
          np.amin(color_arr), np.amax(color_arr), np.shape(color_arr))
    try:
        print('try')
        # ax = plt.scatter(samples[:, 0], samples[:, 1], c=color_arr[:], s=6, alpha=0.5, edgecolors='none')
        ax = plt.scatter(samples[:, 0],
                         samples[:, 1],
                         c=samples_ql[:],
                         s=6,
                         alpha=0.5,
                         edgecolors='none',
                         vmin=ql_min,
                         vmax=ql_max)  #, cmap = cm)
        if np.amax(data_ql[:]) > 0.0:
            plt.colorbar(ax, shrink=0.6)
    except:
        try:
            print('except-try')
            ax = plt.scatter(samples[:, 0],
                             samples[:, 1],
                             c=color_arr[:],
                             s=6,
                             alpha=0.5,
                             edgecolors='none',
                             vmin=ql_min,
                             vmax=ql_max)  #, cmap = cm)
            # ax = plt.scatter(samples[:, 0], samples[:, 1], c=samples_ql[:], s=6, alpha=0.5, edgecolors='none')
            if np.amax(data_ql[:]) > 0.0:
                plt.colorbar(ax, shrink=0.6)
        except:
            print('except-except')
            ax = plt.scatter(samples[:, 0],
                             samples[:, 1],
                             s=6,
                             alpha=0.5,
                             edgecolors='none')  #, cmap = cm)
            # pass
            # pass

    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)
    plt.title('samples (n=' + str(np.size(samples_ql)) + ')')
    labeling(var_name1, var_name2, 0, 0)

    # plt.subplot(2, 3, 6)
    # color_arr = np.zeros(shape=np.shape(data_ql))
    # for i in range(np.shape(data_ql)[0]):
    #     color_arr[i] = data_ql[i] * 1e4
    # print('data_ql. ', np.amin(data_ql), np.amax(data_ql), np.amin(color_arr), np.amax(color_arr))
    # plt.scatter(data[:, 0], data[:, 1], c=color_arr[:], s=5, alpha=0.2)
    # # plt.gray()
    # if type == 'norm':
    #     plt.title('data norm')
    # else:
    #     plt.title('data')
    # labeling(var_name1, var_name2, scale_thl, scale_qt)

    plt.suptitle('Data & Samples: ncomp=' + str(ncomp) + ', z=' + str(z) + 'm',
                 fontsize=18)
    if type == 'norm':
        savename = 'sample_figure_' + 'ncomp' + str(ncomp) + '_norm_' + str(
            z) + 'm.png'
    else:
        savename = 'sample_figure_' + 'ncomp' + str(ncomp) + '_' + str(
            z) + 'm.png'
    plt.savefig(os.path.join(path, 'CloudClosure_figures', savename))
    plt.close()
    return
Exemple #44
0
        elif args.DataSetName == "batvsuper":
            classify_twitter_data(file_name="redmi.txt")
        elif args.DataSetName == "junglebook":
            classify_twitter_data(file_name="iphone.txt")
        elif args.DataSetName == "zootopia":
            classify_twitter_data(file_name="googlepixel.txt")
        elif args.DataSetName == "deadpool":
            classify_twitter_data(file_name="oneplus.txt")
        else:
            print("ERROR while specifying Movie Tweets File, please check the name again")

pca = PCA(n_components=2).fit(data_X)
pca_2d = pca.transform(data_X)
svmClassifier_2d =   svm.LinearSVC(random_state=111).fit(   pca_2d, data_Y)
for i in range(0, pca_2d.shape[0]):
    if data_Y[i] == float('negative'):
        c1 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='r',    s=50,marker='+')
    elif data_Y[i] == float('positive'):
        c2 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='g',    s=50,marker='o')
    elif data_Y[i] == float('neutral'):
        c3 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='b',    s=50,marker='*')
pl.legend([c1, c2], ['Positive', 'Negative','Neutral'])
x_min, x_max = pca_2d[:, 0].min() - 1,   pca_2d[:,0].max() + 1
y_min, y_max = pca_2d[:, 1].min() - 1,   pca_2d[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .01),   np.arange(y_min, y_max, .01))
Z = svmClassifier_2d.predict(np.c_[xx.ravel(),  yy.ravel()])
Z = Z.reshape(xx.shape)
pl.contour(xx, yy, Z)
pl.title('Support Vector Machine Decision Surface')
pl.axis('off')
pl.show()
Exemple #45
0
    def draw(self, W, data=[], C=[], file_name=1):
        if file_name != 1:
            pl.figure(figsize=(14, 6))
            colValue = ['y', 'g', 'b', 'c', 'k', 'm', 'peru', 'darkorchid']

            # 聚类图像
            pl.subplot(1,2,1)
            # 普通的样本
            if len(data) != 0:
                pl.plot(data[:,0], data[:,1], 'ko', markersize="5")

            # 分类的样本
            if len(C) != 0:
                for i in range(len(C)):
                    coo_X = []  # x坐标列表
                    coo_Y = []  # y坐标列表
                    for j in range(len(C[i])):
                        coo_X.append(C[i][j][0])
                        coo_Y.append(C[i][j][1])
                    pl.scatter(coo_X, coo_Y, marker='o', color=colValue[i % len(colValue)], label=i)

                pl.legend(loc='upper right')

            # 神经元
            pl.plot(W[:,0], W[:,1], "ro", marker='o', markersize="7")
            for index in range(self.output[0]*self.output[1]):
                for i in range(self.output[0]*self.output[1]):
                    dist = self.Manhattan_dist(i, index)
                    if dist == 1: 
                        pl.plot(W[[i,index],0], W[[i,index],1], "r")
            
            pl.xlabel('X')
            pl.ylabel('Y')
            pl.title('SOM')
            pl.xlim(-5, 25)
            pl.ylim(-5, 25)

            # 邻域半径图像
            pl.subplot(2,2,2)
            pl.title('Radius of Neighborhood')
            pl.ylabel('Manhattan Distance')
            x = np.linspace(0, self.iteration, self.iteration+1)
            y = np.power(1.115, -x) * 4
            pl.plot(x,y)
            font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 18}
            pl.text(20,3.5,r'$D = 4 \times 1.115^{-t}$', font1)
            pl.plot(self.count,  np.power(1.115, -self.count) * 4, 'go', markersize="12")

            # 绘制学习率图像
            pl.subplot(2,2,4)
            pl.title('Learning Rate')
            pl.xlabel('Iteration')
            pl.ylabel('Rate')
            x = np.linspace(0, self.iteration, self.iteration+1)
            y = np.power(np.e, 0) / (x + 40)
            pl.plot(x,y)
            pl.text(20,0.024,'Batch Size=100, D=0', font1)
            pl.text(20,0.022, r'$R = e^{-D} / (t + 40)$', font1)

            pl.plot(self.count, np.power(np.e, 0) / (self.count + 40), 'go', markersize="12")
            # 保存图片
            pl.savefig(file_name, dpi=300)
            pl.close()
        else:
            pl.figure(figsize=(22, 10))
            colValue = ['y', 'g', 'b', 'c', 'k', 'm', 'peru', 'darkorchid']

            # 聚类图像
            pl.subplot(1,2,1)
            # 普通的样本
            if len(data) != 0:
                pl.plot(data[:,0], data[:,1], 'ko', markersize="5")

            # 分类的样本
            if len(C) != 0:
                for i in range(len(C)):
                    coo_X = []  # x坐标列表
                    coo_Y = []  # y坐标列表
                    for j in range(len(C[i])):
                        coo_X.append(C[i][j][0])
                        coo_Y.append(C[i][j][1])
                    pl.scatter(coo_X, coo_Y, marker='o', color=colValue[i % len(colValue)], label=i)

                pl.legend(loc='upper right')

            # 神经元
            pl.plot(W[:,0], W[:,1], "ro", marker='o', markersize="7")
            for index in range(self.output[0]*self.output[1]):
                for i in range(self.output[0]*self.output[1]):
                    dist = self.Manhattan_dist(i, index)
                    if dist == 1: 
                        pl.plot(W[[i,index],0], W[[i,index],1], "r")
            
            pl.xlabel('X')
            pl.ylabel('Y')
            pl.title('SOM')
            pl.xlim(-5, 25)
            pl.ylim(-5, 25)
            ###########################################################################
            num_tmp = 1000
            for i in range(len(C)):
                for j in range(len(C[i])):
                    num_tmp = min([len(C[i][j]), num_tmp])
            pl.subplot(3,6,4)
            pl.title("Cluster 0 (n=50)")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[0,:].A[0], 'ko-')
            pl.subplot(3,6,5)
            pl.title("Cluster 1 (n=0)")
            pl.ylim(-1,25)

            pl.subplot(3,6,6)
            pl.title("Cluster 2 (n=50)")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[2,:].A[0], 'ko-')
            pl.subplot(3,6,10)
            pl.title("Cluster 3 (n=" + str(50-num_tmp) + ")")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[3,:].A[0], 'ko-')
            pl.subplot(3,6,11)
            pl.title("Cluster 4 (n=" + str(num_tmp) + ")")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[4,:].A[0], 'ko-')
            pl.subplot(3,6,12)
            pl.title("Cluster 5 (n=0)")
            pl.ylim(-1,25)

            pl.subplot(3,6,16)
            pl.title("Cluster 6 (n=50)")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[6,:].A[0], 'ko-')
            pl.subplot(3,6,17)
            pl.title("Cluster 7 (n=50)")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[7,:].A[0], 'ko-')
            pl.subplot(3,6,18)
            pl.title("Cluster 8 (n=50)")
            pl.ylim(-1,25)
            pl.plot([1,2],self.W[8,:].A[0], 'ko-')
            # 保存图片
            pl.savefig('./fig5/fig36.png', dpi=300)
            pl.show()
Exemple #46
0
def plot_PDF(data, data_norm, var_name1, var_name2, nvar, clf, scaler, ncomp,
             error, z, path):
    print('Plot PDF: ncomp=' + str(ncomp) + ', z=' + str(z))

    xmin = np.amin(data_norm[:, 0])
    xmax = np.amax(data_norm[:, 0])
    ymin = np.amin(data_norm[:, 1])
    ymax = np.amax(data_norm[:, 1])

    n_sample = np.int(1e2)
    x_ = np.linspace(xmin, xmax, n_sample)
    y_ = np.linspace(ymin, ymax, n_sample)
    XX_ = np.ndarray(shape=(n_sample**nvar, nvar))

    # (1) print PDF computed by GMM
    delta_i = n_sample
    for i in range(n_sample):
        for j in range(n_sample):
            shift = i * delta_i + j
            XX_[shift, 0] = x_[i]
            XX_[shift, 1] = y_[j]
    ZZ_ = clf.score_samples(XX_)
    ZZ = np.ndarray(shape=(n_sample, n_sample))
    for j in range(n_sample**nvar):
        jshift = np.mod(j, delta_i)
        ishift = (j - jshift) / delta_i
        ZZ[ishift, jshift] = ZZ_[j]

    # XXii = np.zeros(shape=XX_.shape)
    # print('XX before: ', ZZ.shape, XX_.shape)
    # print(XX_[0:3, 0])
    # print(XX_[0:3, 1])
    # print('XX after inverse')
    # XXi = scaler.inverse_transform(XX_)
    # print(XXi[0:3,0])
    # print(XXi[0:3, 1])
    # XXii[:,0] = XX_[:,0]/scaler.scale_[0]
    # XXii[:, 1] = XX_[:, 1]/ scaler.scale_[1]
    # print(scaler.scale_)
    # print('XX after rescale')
    # print(XXii[0:3, 0])
    # print(XXii[0:3, 1])

    # (2) compute normal distribution from parameters given from GMM
    # X_ = np.ndarray()
    # Z_pred =

    plt.figure(figsize=(16, 8))
    plt.subplot(1, 4, 1)
    plt.scatter(data[:, 0], data[:, 1], s=5, alpha=0.2)
    plt.title('data')
    labeling(var_name1, var_name2, 1, 1)
    plt.xlim(np.amin(data[:, 0]), np.amax(data[:, 0]))
    plt.ylim(np.amin(data[:, 1]), np.amax(data[:, 1]))

    plt.subplot(1, 4, 2)
    ax = plt.contour(x_, y_, np.exp(ZZ).T)
    plt.scatter(data_norm[:, 0], data_norm[:, 1], s=5, alpha=0.2)
    plt.colorbar(ax)
    plt.title('data')
    labeling(var_name1, var_name2, scaler.scale_[0], scaler.scale_[1])
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)

    plt.subplot(1, 4, 3)
    ax = plt.contourf(x_, y_, np.exp(ZZ).T)
    plt.colorbar(ax)
    # plt.scatter(data_norm[:, 0], data_norm[:, 1], s=5, alpha=0.2)
    plt.title('PDF(thl, qt)')
    labeling(var_name1, var_name2, scaler.scale_[0], scaler.scale_[1])
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)

    plt.subplot(1, 4, 4)
    try:
        ax = plt.contourf(x_, y_, np.exp(ZZ).T, norm=LogNorm())
        plt.colorbar(ax)
    except:
        print('except in: plot_PDF, subplot(1,4,3)')
    # plt.scatter(data_norm[:, 0], data_norm[:, 1], s=5, alpha=0.2)
    plt.title('PDF(thl, qt)')
    labeling(var_name1, var_name2, scaler.scale_[0], scaler.scale_[1])
    plt.xlim(xmin, xmax)
    plt.ylim(ymin, ymax)

    plt.suptitle('GMM, ncomp=' + str(ncomp) + ', error: ' + str(error) +
                 '    (z=' + str(z) + ')')

    print('')
    print(
        ncomp, z,
        os.path.join(
            path, 'CloudClosure_figures',
            'PDF_figures_' + str(z) + 'm' + '_ncomp' + str(ncomp) + '.png'))
    try:
        plt.savefig(
            os.path.join(
                path, 'CloudClosure_figures', 'PDF_figures_' + str(z) + 'm' +
                '_ncomp' + str(ncomp) + '.png'))
    except:
        print('!!!!! figure with ncomp=' + str(ncomp) + ', z=' + str(z) +
              ' not saved !!!!!')
    # plt.show()
    plt.close()
    return
Exemple #47
0
    xaxisthetis1.append(i)
    bathymetrythetis1.append(-solver_obj.fields.bathymetry_2d.at([i, 0.55]))

# plot thetis results against sisyphe and experiment
from matplotlib import colors as mcolors
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)

plt.rc('font', family='Helvetica')

data = pd.read_excel('data/experimental_data.xlsx',
                     sheet_name='recreatepaperrun',
                     header=None)
diff_sisyphe = pd.read_excel('data/sisyphe_results.xlsx')
thetisdf = pd.read_csv('model_outputs/trench_bed_output.csv')

plt.scatter(data[0], data[1], label='Experimental Data')

plt.plot(thetisdf['x'],
         thetisdf['bathymetry'],
         '--',
         linewidth=3,
         c=colors['darkgreen'],
         label=r'Thetis ($\Delta x = 0.2m$)')
plt.plot(xaxisthetis1,
         bathymetrythetis1,
         colors['mediumblue'],
         label=r'Thetis ($\Delta x = 0.5m$)')
plt.plot(diff_sisyphe['x'][diff_sisyphe['y'] == 0.55],
         -diff_sisyphe['Sisyphe'][diff_sisyphe['y'] == 0.55],
         color=colors['orange'],
         label='Sisyphe')
Exemple #48
0
def plot_drawn_pos(ANTENNAS_i,ANTENNAS_sel,pos_tab,output_folder,task,DISPLAY):
    #ANTENNAS_i are the antenna positions corrected by the position of the intersection between the shower axis and the ground
    #ANTENNAS_sel are the selected antenna positions
    #CORE is the random position
    fig, ax = pl.subplots()
    pl.scatter(ANTENNAS_i[:,0]+pos_tab[-1,0],ANTENNAS_i[:,1]+pos_tab[-1,1],c='b',edgecolors='none') #arrays are move back around (0,0,GdAlt)
    pl.scatter(ANTENNAS_sel[:,0]+pos_tab[-1,0],ANTENNAS_sel[:,1]+pos_tab[-1,1],c='g',edgecolors='none') #arrays are move back around (0,0,GdAlt)
    pl.scatter(pos_tab[:,0],pos_tab[:,1],c='k',edgecolors='none')
    pl.scatter(pos_tab[-1,0],pos_tab[-1,1],c='r',edgecolors='none')
    figname = output_folder+'/inp/fig/plots_'+task+'.png'
    pl.savefig(figname,dpi=350)
    if DISPLAY:
        pl.show()
    pl.close()

    fig, ax = pl.subplots()
    pl.scatter(ANTENNAS_i[:,0],ANTENNAS_i[:,1],c='b',edgecolors='none') #arrays are move back around (0,0,GdAlt)
    pl.scatter(ANTENNAS_sel[:,0],ANTENNAS_sel[:,1],c='g',edgecolors='none') #arrays are move back around (0,0,GdAlt)
    pl.scatter(0.,0.,c='r',edgecolors='none')
    figname = output_folder+'/inp/fig/plots_'+task+'_real.png'
    pl.savefig(figname,dpi=350)
    if DISPLAY:
        pl.show()
    pl.close()
    return
def plot():
	print "Load tracking.txt"
	d = numpy.genfromtxt("tracking.txt", names=True)
	n = len(d)

	# one per particle
	print "Find unique inital particles"
	ids_source = unique_ids(d['ID0'], d['E0'], d['X0'], d['Y0'], d['Z0'], d['P0x'], d['P0y'], d['P0z'], d['ID1'], d['E1'], d['X1'], d['Y1'], d['Z1'], d['P1x'], d['P1y'], d['P1z'])
	nids_source = ids_source.max() + 1
	print "Unique Source: ", nids_source

	# one per initial
	print "Find unique final particles"
	ids_particle = unique_ids(d['ID0'], d['E0'], d['X0'], d['Y0'], d['Z0'], d['P0x'], d['P0y'], d['P0z'])
	nids_particle = ids_particle.max() + 1
	print "Unique Particle: ", nids_source

	# collect data
	print "Detect at different distances"
	select_1000kpc = numpy.ones(nids_source, dtype=int)	* -1
	select_500kpc = numpy.ones(nids_source, dtype=int)	* -1
	select_200kpc = numpy.ones(nids_source, dtype=int)	* -1
	select_100kpc = numpy.ones(nids_source, dtype=int)	* -1
	select_50kpc = numpy.ones(nids_source, dtype=int)	* -1
	select_closest = numpy.ones(nids_source, dtype=int)	* -1

	dist = ((d['X'] - 64)**2 + (d['Y'] - 64)**2 + (d['Z'] - 64)**2)**0.5

	for i in xrange(nids_source):
		if i % 1000 == 0:
			sys.stdout.write(" %5.2f%%\r" % (100. * float(i) / nids_source))
			sys.stdout.flush()
		# select i'th initial particle
		s = numpy.nonzero(ids_source == i)[0]
		s_d = d[s]
		s_dist = dist[s]

		# distances sorted by trajectory
		D_sort = numpy.argsort(s_d['D'])
		dist_sort = s_dist[D_sort]

		idx = numpy.nonzero(dist_sort < 1.000)[0]
		if len(idx) > 0:
			select_1000kpc[i] = s[D_sort[idx[0]]]
		idx = numpy.nonzero(dist_sort < 0.500)[0]
		if len(idx) > 0:
			select_500kpc[i] = s[D_sort[idx[0]]]
		idx = numpy.nonzero(dist_sort < 0.200)[0]
		if len(idx) > 0:
			select_200kpc[i] = s[D_sort[idx[0]]]
		idx = numpy.nonzero(dist_sort < 0.100)[0]
		if len(idx) > 0:
			select_100kpc[i] = s[D_sort[idx[0]]]
		idx = numpy.nonzero(dist_sort < 0.05)[0]
		if len(idx) > 0:
			select_50kpc[i] = s[D_sort[idx[0]]]

		select_closest[i] = s[numpy.argmin(s_dist)]

    # plot first 10 tracks
	first_ten = ids_source < 10
	pylab.figure()
	pylab.scatter(d['X'][first_ten], d['Y'][first_ten], c=ids_source[first_ten], s=50*(d['Z'][first_ten]-63))

	first_ten_1000kpc = select_1000kpc[:10]
	pylab.scatter(d['X'][first_ten_1000kpc], d['Y'][first_ten_1000kpc], marker='+', s=1000)

	first_ten_500kpc = select_500kpc[:10]
	pylab.scatter(d['X'][first_ten_500kpc], d['Y'][first_ten_500kpc], marker='+', s=500)

	first_ten_200kpc = select_200kpc[:10]
	pylab.scatter(d['X'][first_ten_200kpc], d['Y'][first_ten_200kpc], marker='+', s=200)

	first_ten_100kpc = select_100kpc[:10]
	pylab.scatter(d['X'][first_ten_100kpc], d['Y'][first_ten_100kpc], marker='+', s=100)

	first_ten_50kpc = select_50kpc[:10]
	pylab.scatter(d['X'][first_ten_50kpc], d['Y'][first_ten_50kpc], marker='+', s=50)

	first_ten_closest = select_closest[:10]
	pylab.scatter(d['X'][first_ten_closest], d['Y'][first_ten_closest], marker='x', s=50)

	pylab.xlim(63, 65)
	pylab.ylim(63, 65)
	pylab.savefig("scatter.png")
	pylab.show()
	pylab.close()

	# plot skplots
	x, y, z = -d['Px'], -d['Py'], -d['Pz']
	phi = numpy.arctan2(y, x)
	theta = numpy.arctan2(z, (x * x + y * y) ** .5)

	plot_uhecrs(phi, theta, dist, cmap='jet_r')
	pylab.savefig("all.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi, theta, 1./dist)
	pylab.savefig("all_w.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi, theta, 1./(dist**2))
	pylab.savefig("all_w2.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_1000kpc], theta[select_1000kpc], None)
	pylab.savefig("1000kpc.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_500kpc], theta[select_500kpc], None)
	pylab.savefig("500kpc.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_200kpc], theta[select_200kpc], None)
	pylab.savefig("200kpc.png")
	pylab.show()
	pylab.close()


	plot_uhecrs(phi[select_100kpc], theta[select_100kpc], None)
	pylab.savefig("100kpc.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_50kpc], theta[select_50kpc], None)
	pylab.savefig("50kpc.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_closest], theta[select_closest], None)
	pylab.savefig("closest.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_closest], theta[select_closest], 1./dist[select_closest])
	pylab.savefig("closest_w.png")
	pylab.show()
	pylab.close()

	plot_uhecrs(phi[select_closest], theta[select_closest], 1./(dist[select_closest]**2))
	pylab.savefig("closest_w2.png")
	pylab.show()
	pylab.close()
Exemple #50
0
if __name__ == '__main__':
    listener()
    vel_rect = np.array(vel_rect)
    vel_rect_x = np.array(vel_rect_x)
    vel_rect_y = np.array(vel_rect_y)
    vel1 = np.array(vel1)
    vel1_x = np.array(vel1_x)
    vel1_y = np.array(vel1_y)
    arr_rect = np.array(arr_rect)
    arr1 = np.array(arr1)
    # print ("vel1:",(vel1))
    # print ("vel_rect:",vel_rect)
    pl.plot(vel_rect_x[:, 0], vel_rect_x[:, 1], color='b')
    pl.plot(vel1_x[:, 0], vel1_x[:, 1], color='r')
    pl.title('vel_x')
    pl.figure()
    pl.plot(vel_rect_y[:, 0], vel_rect_y[:, 1], color='b')
    pl.plot(vel1_y[:, 0], vel1_y[:, 1], color='r')
    pl.title('vel_y')

    pl.figure()
    pl.plot(vel_rect[:, 0], vel_rect[:, 1], color='b')
    pl.plot(vel1[:, 0], vel1[:, 1], color='r')
    pl.title('vel_mod')
    # pl.show()
    pl.figure()
    pl.scatter(arr1[:, 0], arr1[:, 1], color='r')
    pl.scatter(arr_rect[:, 0], arr_rect[:, 1], color='b')
    pl.title('pos')
    # pl.show()
    # support vectors
    margin = 1 / np.sqrt(np.sum(clf.coef_**2))
    yy_down = yy + a * margin
    yy_up = yy - a * margin

    # plot the line, the points, and the nearest vectors to the plane
    pl.figure(fignum, figsize=(4, 3))
    pl.clf()
    pl.set_cmap(pl.cm.Paired)
    pl.plot(xx, yy, 'k-')
    pl.plot(xx, yy_down, 'k--')
    pl.plot(xx, yy_up, 'k--')

    pl.scatter(clf.support_vectors_[:, 0],
               clf.support_vectors_[:, 1],
               s=80,
               facecolors='none',
               zorder=10)
    pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10)

    pl.axis('tight')
    x_min = -4.8
    x_max = 4.2
    y_min = -6
    y_max = 6

    XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
    Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(XX.shape)
Exemple #52
0
Ordinary Least Squares with SGD
===============================

Simple Ordinary Least Squares example with stochastic
gradient descent, we draw the linear least
squares solution for a random set of points in the plane.
"""
print __doc__

import pylab as pl

from sklearn.linear_model import SGDRegressor
from sklearn.datasets.samples_generator import make_regression

# this is our test set, it's just a straight line with some
# gaussian noise
X, Y = make_regression(n_samples=100,
                       n_features=1,
                       n_informative=1,
                       random_state=0,
                       noise=35)

# run the classifier
clf = SGDRegressor(alpha=0.1, n_iter=20)
clf.fit(X, Y)

# and plot the result
pl.scatter(X, Y, color='black')
pl.plot(X, clf.predict(X), color='blue', linewidth=3)
pl.show()
Exemple #53
0
def line_search(rays, K_ne, m_tci, i0, gradient, g, dobs, CdCt, figname=None):
    M = m_tci.get_shaped_array()
    #g = forward_equation_dask(rays,K_ne,m_tci,i0)
    dd = (g - dobs)**2 / (CdCt + 1e-15)
    S0 = np.sum(dd) / 2.
    ep_a = []
    S_a = []
    S = S0
    #initial epsilon_n
    dd = (g - dobs) / (CdCt + 1e-15)
    ep = 1e-3
    g_ = forward_equation(
        rays, K_ne,
        TriCubic(m_tci.xvec, m_tci.yvec, m_tci.zvec, M - ep * gradient), i0)
    Gm = (g - g_) / ep
    #numerator
    dd *= Gm
    numerator = 2. * np.sum(dd)
    #denominator
    Gm *= Gm
    Gm /= (CdCt + 1e-15)
    denominator = np.sum(Gm)
    epsilon_n0 = np.abs(numerator / denominator)
    epsilon_n = epsilon_n0
    iter = 0
    while S >= S0 or iter < 3:
        epsilon_n /= 2.
        #m_tci.m = m - epsilon_n*gradient.ravel('C')
        g = forward_equation(
            rays, K_ne,
            TriCubic(m_tci.xvec, m_tci.yvec, m_tci.zvec,
                     M - epsilon_n * gradient), i0)
        #print(np.mean(g),np.var(g))
        dd = (g - dobs)**2 / (CdCt + 1e-15)
        S = np.sum(dd) / 2.
        ep_a.append(epsilon_n)
        S_a.append(S)
        #print(epsilon_n,S)
        if not np.isnan(S):
            if S < 1 << 64:
                iter += 1
    epsilon_n, S_p = vertex(*ep_a[-3:], *S_a[-3:])

    g = forward_equation_dask(
        rays, K_ne,
        TriCubic(m_tci.xvec, m_tci.yvec, m_tci.zvec, M - epsilon_n * gradient),
        i0)
    dd = (g - dobs)**2 / (CdCt + 1e-15)
    S = np.sum(dd) / 2.
    print("S0: {} | Estimated epsilon_n: {}".format(S0, epsilon_n0))
    print("Parabolic minimum | epsilon_n = {}, S = {}".format(epsilon_n, S_p))
    print("Actual | S = {}".format(S))
    print("Misfit Reduction: {:.2f}%".format(S / S0 * 100. - 100.))
    if figname is not None:
        plt.plot(ep_a, S_a)
        plt.scatter(epsilon_n, S, c='green', label='Final misfit')
        #plt.scatter(epsilon_n,S_p,c='red',label='Parabolic minimum')
        plt.yscale('log')
        plt.plot([min(epsilon_n, np.min(ep_a)),
                  max(epsilon_n, np.max(ep_a))], [S0, S0],
                 ls='--',
                 c='red')
        plt.xscale('log')
        plt.legend(frameon=False)
        plt.savefig("{}.png".format(figname), format='png')

    return epsilon_n, S, (S / S0 - 1.)
Exemple #54
0
        iY = momentum * iY - eta * (gains * dY)
        Y = Y + iY
        Y = Y - np.tile(np.mean(Y, 0), (n, 1))

        # Compute current value of cost function
        if (iter + 1) % 10 == 0:
            C = np.sum(P * np.log(P / Q))
            print("Iteration %d: error is %f" % (iter + 1, C))

        # Stop lying about P-values
        if iter == 100:
            P = P / 4.

    # Return solution
    return Y


if __name__ == "__main__":
    print(
        "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
    )
    print("Running example on 2,500 MNIST digits...")
    X = np.load("signatures.npy")
    labels = np.load("labels_name.npy")
    num_label = np.squeeze(labels)
    print("labels:", labels)
    print("numlabels:", num_label)
    Y = tsne(X, 2, 50, 20.0)
    pylab.scatter(Y[:, 0], Y[:, 1], num_label, labels)
    pylab.show()
Exemple #55
0
data_ols = np.column_stack((y1,x1,x2))
np.savetxt('OLS.csv',data_ols,delimiter = ",")
print(data_ols[1:10])


## Data Visualization ##

pylab.plot(samples1[:, 0], samples1[:, 1],'*', color = 'red')
pylab.plot(samples2[:, 0], samples2[:, 1],'o',color = 'blue')
pylab.plot(samples3[:, 0], samples3[:, 1],'+',color = 'green')
pylab.show()
pylab.plot(x1)
pylab.plot(x2, color='red')
pylab.plot(y1, color = 'orange')
pylab.show()
pylab.scatter(y1,x1, color='blue')
pylab.scatter(y1,x2, color = 'red')
pylab.scatter(x1,x2, color = 'orange')
pylab.show()
m1, b1 = np.polyfit(x1, y1, 1)
pylab.plot(x1, y1, '.', color = 'blue')
pylab.plot(x1, m1*x1 + b1, '-')
pylab.show()
m2, b2 = np.polyfit(x2, y1, 1)
pylab.plot(x2, y1, '.', color = 'red')
pylab.plot(x2, m1*x2 + b2, '-')
pylab.show()
m3, b3 = np.polyfit(x2, x1, 1)
pylab.plot(x2, x1, '.', color = 'orange')
pylab.plot(x2, m3*x2 + b3, '-')
pylab.show()
Exemple #56
0
        else:
            momentum = final_momentum
        gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * (
            (dY > 0) == (iY > 0))
        gains[gains < min_gain] = min_gain
        iY = momentum * iY - eta * (gains * dY)
        Y = Y + iY
        Y = Y - Math.tile(Math.mean(Y, 0), (n, 1))

        # Compute current value of cost function
        if (iter + 1) % 10 == 0:
            C = Math.sum(P * Math.log(P / Q))
            print "Iteration ", (iter + 1), ": error is ", C

        # Stop lying about P-values
        if iter == 100:
            P = P / 4

    # Return solution
    return Y


if __name__ == "__main__":
    print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
    print "Running example on 2,500 MNIST digits..."
    X = Math.loadtxt("mnist2500_X.txt")
    labels = Math.loadtxt("mnist2500_labels.txt")
    Y = tsne(X, 2, 50, 20.0)
    Plot.scatter(Y[:, 0], Y[:, 1], 20, labels)
    Plot.show()
Exemple #57
0
    #      -0.27342691, -0.40625428, -0.57759724, -0.36327529, -0.68233917, -0.54106024,
    #      -0.594002, -0.54278992, -0.72141841, -0.84960626, -0.99065906, -0.95052521,
    #      -0.77855943, -1.1411366, -0.82951384, -0.98987168, -0.73749955, -1.03039024,
    #      -0.87363562, -0.96550525, -0.78238156, -1.02137395, -0.89266227, -0.9028713,
    #      -0.7883536, -0.72666741, -1.04589251, -0.73791169, -0.71817151, -0.64454691,
    #      -0.50123652, -0.73558315, -0.3898374, -0.40887712, -0.25264427, -0.02445753,
    #      -0.44606617, -0.16614301]
    # y = np.array(y)

    # # x = [[1], [2], [3], [4], [5]]
    # x = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12]]
    # x = np.array(x)
    # # y = [1, 2, 4, 3, 2]
    # y = [1, 2, 4.3, 3, 2, 2, 1.5, 1.3, 1.5, 1.7, 1.8, 2]
    # y = np.array(y)

    print("Nadaray Watson's yest:")
    yest_nadaray = nadaray_watson(x, y)
    print(yest_nadaray)
    print("Lowess yest:")
    yest_lowess = lowess(x, y)
    print(yest_lowess)

    pl.clf()
    pl.scatter(x, y, label='data', color="black")
    pl.plot(x, yest_nadaray, label='y nadaray-watson', color="red")
    pl.plot(x, yest_lowess, label='y lowess', color="blue")
    pl.title('Nadaray Watson vs Lowess')
    pl.legend()
    pl.show()
Exemple #58
0
def tharfwhm(image,lines=lines, doplot=False, silent=False, pltname='fwhm.png'):
    
    imdat = pyfits.getdata(image).transpose()

    if not silent: print( "Pixel xy guess  FWHM    peak ADU")

    allfwhm = []
    allpeaks = []
    x = []
    y = []
    for i in range(lines.shape[0]):
        lines[i] = np.round(lines[i])
        imbox = imdat[lines[i,0]-win[0]/2:lines[i,0]+win[0]/2, lines[i,1]-1-win[1]/2:lines[i,1]-1+win[1]/2]

        if np.max(imbox) < 500:
            if not silent: print( "Line flux too small!")
            continue
        if np.max(imbox) > 40000:
            if not silent: print( "Line too close to saturation!")
            continue
        
        bins, edges = np.histogram(imbox, bins=50)
        mode = edges[np.argmax(bins)]
        imflat = np.sum(imbox-mode,axis=0)
        
        xfit = np.arange(-win[0]/2, win[0]/2, 1)
        p0 = [np.max(imflat)-np.min(imflat), 0.0, 2.5, np.min(imflat)]
        #pfin,covar = curve_fit(gauss, xfit, imflat, p0=p0)
        pfin = leastsq(errfunc, p0, args=(xfit, imflat), maxfev=10000)[0]
        yfit = gauss(xfit, pfin)

        fwhm = 2*np.sqrt(2*np.log(2))*pfin[2]
        if fwhm < 0:
            if not silent: print( "Negative FWHM, bad fit!")
            continue
        
        allfwhm.append(fwhm)
        x.append(lines[i,0])
        y.append(lines[i,1])
        allpeaks.append(np.max(imbox))
        
        if False and doplot:
            import pylab as pl
            pl.ion()
            pl.clf()
            xplot = np.linspace(min(xfit), max(xfit), 200)
            yfit = yfit = gauss(xplot, pfin)
            pl.plot(xfit, imflat,'k-o', markersize=8)
            pl.plot(xplot, yfit, 'b-')
            pl.ylabel('ADU')
            pl.xlabel('$\\Delta$pixel')
            pl.draw()
            time.sleep(0.2)
        
        if not silent: print( "%s\t%6.4f\t%d" % (lines[i], fwhm, np.max(imbox)))

    if not silent: print( "-------------------------------\n")

    if doplot:
        import pylab as pl
        pl.ioff()
        from scipy.interpolate import griddata
        x = np.array(x)
        y = np.array(y)
        z = np.array(allfwhm)
        xi = np.linspace(min(x), max(x), 1000)
        yi = np.linspace(min(y), max(y), 1000)
        zi = griddata((x,y),z, (xi[None,:],yi[:,None]), method='nearest')

        pl.imshow(np.log10(imdat), vmin=np.log10(1000), vmax=np.log10(1500), aspect=7)
        #pl.contourf(yi,xi,zi, 50)
        pl.scatter(y,x,c=z,s=100, vmin=2, vmax=5)
        pl.xlim(min(y), max(y))
        pl.ylim(min(x), max(x))
        pl.colorbar()
        pl.title("median FWHM = %4.2f" % np.median(allfwhm))
        pl.xlabel('row [px]')
        pl.ylabel('col [px]')
        pl.savefig(pltname)
        #pl.show()
        pl.clf()
    
    return np.median(allfwhm), np.max(allpeaks)
Exemple #59
0
    fs.append(f)
    if i % viz_every == 0:
        np_samples.append(
            np.vstack([sess.run(samples) for _ in xrange(n_batches_viz)]))
        xx, yy = sess.run([samples, data])

import seaborn as sns

data_t = sample_mog(params['batch_size'])
data = sess.run(data_t)
fig = pylab.gcf()
fig.set_size_inches(16.0, 16.0)
pylab.clf()
pylab.scatter(data[:, 0],
              data[:, 1],
              s=20,
              marker="o",
              edgecolors="none",
              color='blue')
pylab.xlim(-4, 4)
pylab.ylim(-4, 4)
pylab.savefig("gt_scatter.png")

color = "Greens"
fig = pylab.gcf()
fig.set_size_inches(16.0, 16.0)
pylab.clf()
bg_color = sns.color_palette(color, n_colors=256)[0]
ax = sns.kdeplot(data[:, 0],
                 data[:, 1],
                 shade=True,
                 cmap=color,
X2 = list(np.linspace(0.65, 0.85, 5))
X1.extend(X2)
X = X1
X = np.array(X)
Y = np.sin(2 * np.pi * X)
N = X.shape[0]

alpha = []
for i in range(N):
    alpha_ = 0.01
    alpha.append(alpha_)
alpha = np.array(alpha)

X_plot = X
Y_plot = Y + np.random.normal(0, alpha)
pylab.scatter(X_plot, Y_plot)

kernel = C(1.0, (0.01, 100)) \
    * ManifoldKernel.construct(base_kernel=RBF(length_scale=10), architecture=((1, 6, 2),),
                               transfer_fct="tanh", max_nn_weight=1)
gp = GaussianProcessRegressor(kernel=kernel,
                              alpha=alpha**2,
                              n_restarts_optimizer=1)
'''
kernel = C(1.0) * RBF(length_scale=0.1)
gp = GaussianProcessRegressor(kernel=kernel, alpha=alpha ** 2, n_restarts_optimizer=10)
'''

gp.fit(X[:, None], Y)

XX = np.linspace(-1.5, 1.5, 100)