예제 #1
0
 def fit(self, b, new_data, a, basis_num=5):
     self.basis_num = basis_num
     self.phi = np.zeros((1, self.basis_num))
     for i in range(self.basis_num):
         self.phi[0, i] = new_data[0]**i
     self.precision = self.noise_var * matmul(matrix_transpose(
         self.phi), self.phi) + b * np.identity(self.basis_num)
     self.mean = self.noise_var * matmul(inv(
         self.precision), matrix_transpose(self.phi)) * new_data[1]
     self.noise_var = 1 / a
     self.data_count += 1
예제 #2
0
def fc_layer(input_tensor, out_channels):
    """
    Helper function to do batch normalization of full connection layer
    :param input_tensor: 2D tensor
    :param out_channels: int
    :return: the 2D tensor after being normalized
    """
    weights_shape = [input_tensor.get_shape().as_list()[-1], out_channels]

    weights_init = tf.truncated_normal(
        weights_shape,
        stddev=np.sqrt(2 / (weights_shape[0] + weights_shape[1])))
    weights = tf.Variable(initial_value=weights_init,
                          dtype=tf.float32,
                          name="weights")
    tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                         tf.reduce_sum(tf.abs(weights)))

    mul_tensor = ops.matmul(input_tensor, weights)

    bias = tf.Variable(initial_value=tf.zeros((weights_shape[1]),
                                              dtype=tf.float32),
                       name="bias")
    tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                         tf.reduce_sum(tf.abs(bias)))
    add_tensor = mul_tensor + bias

    return add_tensor
예제 #3
0
def fc_layer(input_tensor, out_channels):
    weights_shape = [input_tensor.get_shape().as_list()[-1], out_channels]

    weights_init = tf.truncated_normal(
        weights_shape,
        stddev=np.sqrt(2 / (weights_shape[0] + weights_shape[1])))
    weights = tf.Variable(initial_value=weights_init,
                          dtype=tf.float32,
                          name="weights")
    tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                         tf.reduce_sum(tf.abs(weights)))

    mul_tensor = ops.matmul(input_tensor, weights)

    bias = tf.Variable(initial_value=tf.zeros((weights_shape[1]),
                                              dtype=tf.float32),
                       name="bias")
    tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                         tf.reduce_sum(tf.abs(bias)))
    return mul_tensor + bias
예제 #4
0
 def update(self, new_data):
     for i in range(self.basis_num):
         self.phi[0, i] = new_data[0]**i
     S = self.precision
     self.precision = self.noise_var * matmul(matrix_transpose(self.phi),
                                              self.phi) + S
     self.mean = matmul(
         inv(self.precision),
         (self.noise_var * matrix_transpose(self.phi) * new_data[1] +
          matmul(S, self.mean)))
     self.p_var = (1 / self.noise_var) + matmul(
         matmul(self.phi, inv(self.precision)), matrix_transpose(self.phi))
     self.p_mean = matmul(matrix_transpose(self.mean),
                          matrix_transpose(self.phi))
     self.data_count += 1
     #print(new_data,self.p_var,self.p_mean)
     #print(self.precision,self.mean,self.p_var,self.p_mean)
     return self.precision, self.mean, self.p_var, self.p_mean
예제 #5
0
import pandas as pd
import tensorflow as tf

import ops

tf.set_random_seed(0)
np.random.seed(0)
np.set_printoptions(precision=5, linewidth=120, suppress=True)

mat_val = np.array([[1 / (i + j + 1) for i in range(10)] for j in range(10)])
rhs_val = 0.05 * np.ones([10, 1])

mat = tf.constant(value=mat_val, dtype=tf.float32)
rhs = tf.constant(value=rhs_val, dtype=tf.float32)
x = tf.Variable(initial_value=tf.zeros_like(rhs), dtype=tf.float32)

loss = tf.reduce_sum(tf.square(ops.matmul(mat, x) - rhs))
train_op = tf.train.GradientDescentOptimizer(1e-2).minimize(loss)

with tf.Session() as sess:
    tf.global_variables_initializer().run()

    df = pd.DataFrame(columns=["step", "loss"])
    for i in range(100):
        if i % 5 == 0:
            loss_val = sess.run(loss)
            df.loc[i] = [str(i), str(loss_val)]
            print("step:{}\tloss:{}".format(i, loss_val))
        sess.run(train_op)

    df.to_csv("./logs/csv/math_so.csv")
예제 #6
0
D1x, D1y = MulGaussianGen(0.2, 0.2, 0.3, 0.1)
D2x, D2y = MulGaussianGen(1, 0.03, 0.7, 0.2)
x = np.reshape(np.asarray(D1x + D2x), (-1, 1))
y = np.reshape(np.asarray(D1y + D2y), (-1, 1))
data = np.concatenate((np.ones((x.shape[0], 1)), x, y), axis=1)
feature_dim = 2
weights = np.ones((feature_dim + 1, 1))
w_update = np.zeros((feature_dim + 1, 1))
predict = np.zeros((data.shape[0], 1)) + 0.5
real_class = np.concatenate((np.zeros((len(D1x), 1)), np.ones((len(D2x), 1))),
                            axis=0)

D = np.zeros((data.shape[0], data.shape[0]))

for opt_itr in range(5000):
    predict = 1 / (1 + np.exp((-1) * matmul(data, weights)))
    gradient = matmul(matrix_transpose(data), (predict - real_class))
    if linalg.cond(x) < 1 / sys.float_info.epsilon:
        #invertible
        for i in range(data.shape[0]):
            D[i, i] = exp(-1 * matmul(data[i, :][np.newaxis], weights)) / (
                (1 + exp(-1 * matmul(data[i, :][np.newaxis], weights)))**2)
        H = matmul(matmul(matrix_transpose(data), D), data)
        H_inverse = matrix_inverse(H)
        w_update = matmul(H_inverse, gradient)
    else:
        #singular?
        w_update = gradient
    weights = weights - w_update
    if (np.amax(np.abs(gradient)) < 0.3):
        print("Iteration:{}".format(opt_itr))
import batchFuncs
import ops

batch_size, D_in, D_hid, D_out = 2, 3, 5, 1
W1 = torch.randn(D_in, D_hid, requires_grad=True)
W2 = torch.randn(D_hid, D_out, requires_grad=True)
X = torch.randn(batch_size, D_in, requires_grad=True)


class ProtectAggregate(torch.autograd.Function):
    @staticmethod
    def forward(ctx, X):
        ctx.save_for_backward(X)
        return X

    @staticmethod
    def backward(ctx, batched_grad):
        X, = ctx.saved_variables
        print("protect", batched_grad.shape)
        return batched_grad


p = ProtectAggregate.apply

output = torch.sum(ops.matmul(ops.matmul(p(X), p(W1)), p(W2)), dim=0)

jacobian = torch.autograd.grad(output, [W1, W2], retain_graph=True)

print(jacobian)
pdb.set_trace()