Exemplo n.º 1
0
def plotcostone_opt(vect=1):
    """Using test_cost plots convergance of cost fn gradient for decreasing
    value of alpha.
    """
    sns.set_context('poster',
                    font_scale=1.5,
                    rc={
                        'lines.linewidth': 1,
                        'lines.markersize': 6
                    })
    fig, ax = plt.subplots(
        nrows=1,
        ncols=1,
    )  #figsize=(10,10))
    sns.set_style('ticks')
    power = np.arange(1, 14, 1)
    xlist = [10**(-x) for x in power]
    d = dC.DalecData(
        1999,
        2000,
        'nee_day',
        nc_file='../../alice_holt_data/ah_data_daily_test_nee3.nc',
        scale_nee=1)
    m = mc.DalecModel(d)
    tstlist = [test_cost_opt(m, x, vect) for x in xlist]
    ax.semilogx(xlist, tstlist, 'k', marker='x', mew=1, ms=8)
    #ax.semilogx(xlist, tstlist, 'k', 'x')
    plt.xlabel(r'$\alpha$')
    plt.ylabel(r'$f(\alpha)$')
    #plt.title('test of the gradient of the cost function')
    print tstlist
    return ax, fig
Exemplo n.º 2
0
def plotcostone_ens(vect=1, sizee=20):
    """Using test_cost plots convergance of cost fn gradient for decreasing
    value of alpha.
    """
    sns.set_context('poster',
                    font_scale=1.5,
                    rc={
                        'lines.linewidth': 1,
                        'lines.markersize': 6
                    })
    fig, ax = plt.subplots(
        nrows=1,
        ncols=1,
    )  #figsize=(10,10))
    sns.set_style('ticks')
    power = np.arange(1, 14, 1)
    xlist = [10**(-x) for x in power]
    xb_opt = np.array([60.3, 149.17, 276.1])
    d = dC.DalecDataTwin(1999, 2000, 'nee_day', err_scale=0.25)
    d.xb = cp.copy(d.x_truth)
    d.xb[d.params4opt] = xb_opt
    d.opt_xb = xb_opt
    m = mc.DalecModel(d, size_ens=sizee)
    m.run_ensemble(xb_opt)
    tstlist = [abs(test_cost_ens(m, x, vect)) for x in xlist]
    ax.semilogx(xlist, tstlist, 'k', marker='x', mew=1, ms=8)
    #ax.semilogx(xlist, tstlist, 'k', 'x')
    plt.xlabel(r'$\alpha$')
    plt.ylabel(r'$f(\alpha)$')
    #plt.title('test of the gradient of the cost function')
    print tstlist
    return ax, fig
Exemplo n.º 3
0
def test_cost_cvt(alph=1e-8, vect=0):
    """Test for cost and gradcost functions.
    """
    d = dC.DalecDataTwin(
        1999,
        2000,
        'nee',
        nc_file='../../alice_holt_data/ah_data_daily_test_nee3.nc',
        scale_nee=1)
    m = mc.DalecModel(d, size_ens=1)
    pvals = d.edinburgh_mean
    zvals = m.pvals2zvals(pvals)
    gradj = m.gradcost_cvt(zvals)
    print gradj.shape
    if vect == 0:
        h = zvals * (np.linalg.norm(zvals))**(-1)
    elif vect == 1:
        h = gradj * (np.linalg.norm(gradj))**(-1)
    elif vect == 2:
        h = np.ones(23) * (np.sqrt(23)**-1)
    j = m.cost_cvt(zvals)
    jalph = m.cost_cvt(zvals + alph * h)
    print jalph - j
    print np.dot(alph * h, gradj)
    print(jalph - j) / (np.dot(alph * h, gradj))
    return abs(jalph - j) / (np.dot(alph * h, gradj))
Exemplo n.º 4
0
def test_costfn(alph=1e-9):
    """Test for cost and gradcost functions.
    """
    d = dC.DalecData(50, 'nee')
    m = mc.DalecModel(d)
    gradj = m.gradcost(d.pvals)
    h = gradj * (np.linalg.norm(gradj))**(-1)
    j = m.cost(d.pvals)
    jalph = m.cost(d.pvals + alph * h)
    print(jalph - j) / (np.dot(alph * h, gradj))
    assert (jalph - j) / (np.dot(alph * h, gradj)) < 1.0001
Exemplo n.º 5
0
def test_linmod(gamma=1e1):
    """ Test from TLM to check it converges.
    """
    d = dC.DalecData(731, 'nee')
    pvals = d.pvals
    m = mc.DalecModel(d)
    cx, matlist = m.linmod_list(pvals)
    pvals2 = pvals * (1 + 0.3 * gamma)
    cxdx = m.mod_list(pvals2)[-1]
    pvals3 = pvals * (0.3 * gamma)

    dxl = np.linalg.norm(np.dot(m.mfac(matlist, 730), pvals3.T))

    dxn = np.linalg.norm(cxdx - cx - dxl)
    return dxl / dxn
Exemplo n.º 6
0
def test_cost(alph=1e-8, vect=0):
    """Test for cost and gradcost functions.
    """
    d = dC.DalecData(365, 'nee')
    m = mc.DalecModel(d)
    pvals = d.edinburghmean
    gradj = m.gradcost2(pvals)
    if vect == True:
        h = pvals * (np.linalg.norm(pvals))**(-1)
    else:
        h = gradj * (np.linalg.norm(gradj))**(-1)
    j = m.cost(pvals)
    jalph = m.cost(pvals + alph * h)
    print jalph - j
    print np.dot(alph * h, gradj)
    return (jalph - j) / (np.dot(alph * h, gradj))