def __init__(self, x, dw, h): N = x.shape[0] L = x.shape[1] - 1 B = mckean.bMat(x, dw) self.b = tf.constant(B, dtype=tf.float32) self.models = [] self.trainable_weights = [] for j in range(L): m = tf.keras.Sequential([ tf.keras.layers.Input(1), tf.keras.layers.Dense( 100, activation=tf.nn.relu), # input shape required tf.keras.layers.Dense(100, activation=tf.nn.relu), tf.keras.layers.Dense(1) ]) self.trainable_weights.extend(m.trainable_weights) self.models.append(m) u, udW = mckean.genproc_1dim_ex(L, h, N, mckean.a, mckean.b) self.f = tf.constant(np.mean( mckean.f( np.tile(x[:, -1], (N, 1)).transpose(), np.tile(u[:, -1], (N, 1))), 1), dtype=tf.float32) self.loss_fn = VarianceError() self.pred_y = tf.zeros((N))
def __init__(self, x, dw, h): # bv = mean(b(X(:,l)*ones(1,N),ones(N,1)*X(:,l)'),2); # B(:,((l-1)*K+1):(l*K))=A.*(bv.*dW(:,l+1)*ones(1,K)); self.K = 10 N = x.shape[0] L = x.shape[1] - 1 B = np.zeros((N, L)) for l in range(L): xm = np.tile(x[:, l], (N, 1)) B[:, l] = np.mean(mckean.b(xm.transpose(), xm), 1) * dw[:, l + 1] self.b = B self.b = tf.constant(B, dtype=tf.float32) u, udW = mckean.genproc_1dim_ex(L, h, N, mckean.a, mckean.b) self.f = tf.constant(np.mean( mckean.f( np.tile(x[:, -1], (N, 1)).transpose(), np.tile(u[:, -1], (N, 1))), 1), dtype=tf.float32) self.alpha = tf.Variable(tf.zeros([self.K])) base = np.zeros((N, L, self.K)) for l in range(L): base[:, l, :] = mckean.genPoly( x[:, l], self.K) #* np.tile(self.b[:, 5], (self.K, 1)).transpose() self.tbase = tf.constant(base, dtype=tf.float32)
import mckean import numpy as np h = 0.02 L = int(1 / h) N = 1000 M = 100 # Matlab result: mean=0.4084 std=0.0087 result_mc = np.zeros(M) for j in range(M): X, deltaW = mckean.genproc_1dim_ex(L, h, N, mckean.a, mckean.b) result_mc[j] = np.mean(mckean.f(X[:, -1], X[:, -1])) print(np.mean(result_mc)) print(np.std(result_mc))
#tf.math.reduce_variance(tf.keras.backend.sum(x*self.b, axis=1)-self.f) epochs = range(100) for epoch in epochs: for i in range(30): current_loss = model.trainstep(optimizer, tX) print('Epoch %2d: loss=%2.8f' % (epoch, current_loss)) result_mc = np.zeros(M) result_mc_cv = np.zeros(M) result_cv = np.zeros(M) for j in range(M): X, deltaW = mckean.genproc_1dim_ex(L, h, N, mckean.a, mckean.b) tX = tf.constant(X[:, 1:], dtype=tf.float32) cvF = np.sum(model(tX).numpy() * mckean.bMat(X, deltaW), axis=1) result_cv[j] = np.mean(cvF) u, udW = mckean.genproc_1dim_ex(L, h, N, mckean.a, mckean.b) fT = np.mean( mckean.f( np.tile(X[:, -1], (N, 1)).transpose(), np.tile(u[:, -1], (N, 1))), 1) result_mc[j] = np.mean(fT) result_mc_cv[j] = np.mean(fT - cvF) np.savez("data/nncv_f2.npz", result_mc, result_mc_cv) print('MC: mean=%2.6f std=%2.6f' % (np.mean(result_mc), np.std(result_mc))) print('MC-CV: mean=%2.6f std=%2.6f' % (np.mean(result_mc_cv), np.std(result_mc_cv))) plot.boxplot([result_mc, result_mc_cv])
learning_rate = 0.1 optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) current_loss = model.loss(model(X)) print('Initial Loss: loss=%2.5f' % current_loss) epochs = range(100) for epoch in epochs: with tf.GradientTape() as t: t.watch(model.alpha) current_loss = model.loss(model(X)) dAlpha = t.gradient(current_loss, [model.alpha]) optimizer.apply_gradients(zip(dAlpha, [model.alpha])) print('Epoch %2d: loss=%2.5f' % (epoch, current_loss)) print(model.alpha) result_mc = np.zeros(M) result_mc_cv = np.zeros(M) result_mc_cv2 = np.zeros(M) for j in range(M): X, deltaW = mckean.genproc_1dim_ex(L, h, N, mckean.a, mckean.b) cvF = np.sum(model(X).numpy() * mckean.bMat(X, deltaW), axis=1) result_mc[j] = np.mean(mckean.f(X[:, -1], X[:, -1])) result_mc_cv[j] = np.mean(mckean.f(X[:, -1], X[:, -1]) - cvF) result_mc_cv2[j] = np.mean(cvF - mckean.f(X[:, -1], X[:, -1])) print('MC: mean=%2.6f std=%2.6f' % (np.mean(result_mc), np.var(result_mc))) print('MC-CV: mean=%2.6f std=%2.6f' % (np.mean(result_mc_cv), np.var(result_mc_cv)))