Пример #1
0
                    print('coverged')
                    convergence = True

            if i % 5000 == 0 or (convergence):
                print('[{:4d}] loss : {:.4f} '.format(i, error))

            if convergence:
                break


# learning rate = 0.1
if __name__ == "__main__":
    data_type = ['XOR', 'Linear']
    for d in data_type:

        X_train, y_train = GenData.fetch_data(d, 60)
        X_test, y_test = GenData.fetch_data(d, 20)

        net = SimpleNet([2, 4, 4, 1], learning_rate=0.1)
        net.train(X_train, y_train, threshold=0.0001)

        print('=' * 15, d, '=' * 15)

        # Training (data leakage)
        y_pred = net.forward(X_train)
        print('train loss : ', loss(y_pred, y_train))
        print('train accuracy : {:.2f}%'.format(
            np.count_nonzero(np.round(y_pred) == y_train) * 100 / len(y_pred)))

        print('-' * 30)
Пример #2
0
T_inf = 25

z = np.linspace(0, 1, N + 1)

# parameters for GenData
M = 32

# parameters for Prior
beta_prior = np.ones((N - 1, ))
sigma_prior = 1
C_beta = np.diag(sigma_prior**2 * np.ones((N - 1, )))

# start
model = Model(N, eps_0, h, T_inf)
prior = Prior(beta_prior, C_beta)
data = GenData(M, h, T_inf)

R = np.linalg.cholesky(C_beta)

N_samples = int(1e3)

beta_samp_lst = []
T_samp_lst = []
for i in range(N_samples):
    s = np.random.randn(N - 1)

    beta_sample = beta_prior + R.dot(s)
    T_sample, _ = model.get_T_mult(beta_sample)

    T_samp_lst.append(T_sample)
Пример #3
0
N = 40
h = 0.5
eps_0 = 5e-4
T_inf = 50

z = np.linspace(0, 1, N+1)
M = 20

beta_prior = np.ones((N-1,))
sigma_prior = 1e-1
C_beta = np.diag(sigma_prior**2*np.ones((N-1,)))


model = Model(N, eps_0, h, T_inf)
prior = Prior(beta_prior, C_beta)
data = GenData(M, h, T_inf)
data.gen_data(2, 'vector', scalar_noise=0.02)
objfn = ObjectiveFn(z, data, model, prior)

data_true = GenData(N, h, T_inf)
T_true, _ = data_true.get_T_true()
beta_test = model.get_beta_true(T_true)

G_adjoint = objfn.compute_gradient_adjoint(beta_test)
G_direct = objfn.compute_gradient_direct(beta_test)
G_adjoint_cont = objfn.compute_gradient_adjoint_cont(beta_test)


plt.figure()
for epsilon in [-1,-2,-3,-4]:
    G_findiff = objfn.compute_gradient_findiff(beta_test, epsilon=10**epsilon)
Пример #4
0
N = 32
h = 0.5
eps_0 = 5e-4
T_inf = 50 * np.ones((N - 1, ))

z = np.linspace(0, 1, N + 1)
M = 32
T_inf_data = 50 * np.ones((M - 1, ))

beta_prior = np.ones((N - 1, ))
sigma_prior = 0.8
C_beta = np.diag(sigma_prior**2 * np.ones((N - 1, )))

model = Model(N, eps_0, h, T_inf)
prior = Prior(beta_prior, C_beta)
data = GenData(M, h, T_inf_data)
data.gen_data(100, C_m_type=C_m_type, scalar_noise=0.02)

objfn = ObjectiveFn(z, data, model, prior)

beta0 = np.ones(N - 1)
optimizer = 'CG'

opt = Optimization(model, prior, data, objfn, beta0, optimizer)

opt.compute_MAP_properties()
opt.compute_base_properties()
opt.sample_posterior(int(1e6))
opt.compute_true_properties()

f = plt.figure(figsize=(14, 4))
Пример #5
0
Файл: fig1.py Проект: afvk/FIML
import numpy as np
import matplotlib.pyplot as plt

from Models import Model
from GenData import GenData

N = 30

h = 0.5
eps_0 = 5e-4

z = np.linspace(0, 1, N + 1)

plt.figure()
for T_inf in np.linspace(5, 50, 5):
    data = GenData(N, h, T_inf)
    mod = Model(N, eps_0, h, T_inf)

    T_true, _ = data.get_T_true()
    T_base, _ = mod.get_T_base()

    plt.plot(z[1:N],
             T_true,
             marker='s',
             fillstyle='none',
             linestyle='none',
             color='k',
             label='True' if T_inf == 5 else '')
    plt.plot(z[1:N], T_base, 'r', label='Base' if T_inf == 5 else '')

plt.xlabel('z')