Exemple #1
0
    def __init__(self, name, DNA_size, global_pop, N_POP_size, Factor=2):
        """
        这个类用来定义种群网络
        :param name: 用来表示所在种群的名称
        :param DNA_size: 用来表示所在种群的大小
        :param global_pop: 用来存储对应的全局网络
        :param N_POP_size: 用来表示每个种群要生成的offset的个数
        :param Factor: 用来表示所选取的offset用于更新网络的个数因子
        """
        with tf.variable_scope(name):
            self.name = name
            self.DNA_size = DNA_size
            self.N_POP_size = N_POP_size
            self.C_POP_size = math.floor(N_POP_size / Factor)
            with tf.variable_scope('mean'):
                self.mean = tf.Variable(tf.truncated_normal([
                    self.DNA_size,
                ],
                                                            stddev=0.05,
                                                            mean=0.5),
                                        dtype=tf.float16,
                                        name=name + '_mean')
            with tf.variable_scope('cov'):
                self.cov = tf.Variable(1.0 * tf.eye(self.DNA_size),
                                       dtype=tf.float16,
                                       name=name + '_cov')
            self.mvn = MultivariateNormalFullCovariance(loc=self.mean,
                                                        covariance_matrix=abs(
                                                            self.cov))
            self.make_kid = self.mvn.sample(self.N_POP_size)
            self.tfkids_fit = tf.placeholder(tf.float16, [
                self.C_POP_size,
            ])
            self.tfkids = tf.placeholder(tf.float16,
                                         [self.C_POP_size, self.DNA_size])
            self.loss = -tf.reduce_mean(
                self.mvn.log_prob(self.tfkids) * self.tfkids_fit)
            self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
            self.mean_params = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/mean')
            self.cov_params = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/cov')

            with tf.name_scope('pull'):
                self.pull_mean_op = self.mean.assign(global_pop.mean)
                self.pull_cov_op = self.cov.assign(global_pop.cov)
            with tf.name_scope('push'):
                self.push_mean_op = global_pop.mean.assign(self.mean)
                self.push_cov_op = global_pop.cov.assign(self.cov)
            with tf.name_scope('restart'):
                self.re_mean_op = self.mean.assign(
                    tf.Variable(tf.truncated_normal([
                        self.DNA_size,
                    ],
                                                    stddev=0.05,
                                                    mean=0.5),
                                dtype=tf.float32))
                self.re_cov_op = self.cov.assign(
                    tf.Variable(1.0 * tf.eye(self.DNA_size), dtype=tf.float32))
Exemple #2
0
 def __init__(self, name, data, global_pop, bili):
     with tf.variable_scope(name):
         self.name = name
         self.max_fit = 0.0
         self.fit_val = 0.0
         self.dr = 0.0
         self.bili = bili
         self.DNA_size = data.getDNA_size()
         # self.mean_params, self.cov_params, self.mean, self.cov = self._creat_net(name, data.getDNA_size())
         with tf.variable_scope('mean'):
             self.mean = tf.Variable(tf.truncated_normal([
                 self.DNA_size,
             ],
                                                         stddev=0.1,
                                                         mean=0.5),
                                     dtype=tf.float32,
                                     name=name + '_mean')
         with tf.variable_scope('cov'):
             self.cov = tf.Variable(1.0 * tf.eye(self.DNA_size),
                                    dtype=tf.float32,
                                    name=name + '_cov')
         self.mvn = MultivariateNormalFullCovariance(loc=self.mean,
                                                     covariance_matrix=abs(
                                                         self.cov))
         self.make_kid = self.mvn.sample(N_POP)
         self.tfkids_fit = tf.placeholder(tf.float32, [
             N_POP,
         ])
         self.tfkids = tf.placeholder(tf.float32, [N_POP, self.DNA_size])
         # self.loss = -tf.reduce_mean(
         #     self.mvn.log_prob(self.tfkids) * self.tfkids_fit + 0.01 * self.mvn.log_prob(
         #         self.tfkids) * self.mvn.prob(
         #         self.tfkids))
         self.loss = -tf.reduce_mean(
             self.mvn.log_prob(self.tfkids) * 0.04 * (self.tfkids_fit**3))
         self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
         # self.train_op = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
         #     self.loss)  # compute and apply gradients for mean and cov
         self.mean_params = tf.get_collection(
             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/mean')
         self.cov_params = tf.get_collection(
             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/cov')
         with tf.name_scope('pull'):
             self.pull_mean_op = self.mean.assign(global_pop.mean)
             self.pull_cov_op = self.cov.assign(global_pop.cov)
             # self.pull_mean_params_op = [l_p.assign(g_p) for l_p, g_p in
             #                             zip(self.mean_params, global_pop.mean_params)]
             # self.pull_cov_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.cov_params, global_pop.cov_params)]
         with tf.name_scope('push'):
             self.push_mean_op = global_pop.mean.assign(self.mean)
             self.push_cov_op = global_pop.cov.assign(self.cov)
Exemple #3
0
 def _built_net(self, scope, DNA_SIZE, N_POP):
     mean = tf.Variable(tf.truncated_normal([DNA_SIZE, ], stddev=0.02, mean=0.5), dtype=tf.float32)
     cov = tf.Variable(tf.eye(DNA_SIZE), dtype=tf.float32)
     mvn = MultivariateNormalFullCovariance(loc=mean, covariance_matrix=abs(
          cov + tf.Variable(0.001 * tf.eye(DNA_SIZE), dtype=tf.float32)))
     # make_kid = mvn.sample(N_POP)
     return mvn
Exemple #4
0
 def __init__(self, name, pops, pop, sub, sub_size):
     """
     这个类是用来定义全局网络的类
     :param name: 用来表示所在种群全局网络的名称
     :param pops: 用来表示所有的线程
     :param pop: 用来表示所在的种群
     :param sub: 用来表示所在的子集
     :param sub_size: 用来表示所在种群的种群大小
     """
     with tf.variable_scope(name):
         self.name = name
         self.pops = pops
         self.pop = pop
         self.sub = sub
         self.sub_size = sub_size
         with tf.variable_scope('mean'):
             self.mean = tf.Variable(tf.truncated_normal([
                 self.sub_size,
             ],
                                                         stddev=0.05,
                                                         mean=0.5),
                                     dtype=tf.float32,
                                     name=name + '_mean')
         with tf.variable_scope('cov'):
             self.cov = tf.Variable(0.5 * tf.eye(self.sub_size),
                                    dtype=tf.float32,
                                    name=name + '_cov')
         self.mvn = MultivariateNormalFullCovariance(loc=self.mean,
                                                     covariance_matrix=abs(
                                                         self.cov))
         self.mean_params = tf.get_collection(
             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/mean')
         self.cov_params = tf.get_collection(
             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/cov')
Exemple #5
0
 def __init__(self, name, DNA_size):
     """
     这个类是用来定义全局网络的类
     :param name: 用来表示所在种群全局网络的名称
     :param DNA_size: 用来表示所在种群的种群大小
     """
     with tf.variable_scope(name):
         self.name = name
         self.DNA_size = DNA_size
         with tf.variable_scope('mean'):
             self.mean = tf.Variable(tf.truncated_normal([
                 self.DNA_size,
             ],
                                                         stddev=0.05,
                                                         mean=0.5),
                                     dtype=tf.float16,
                                     name=name + '_mean')
         with tf.variable_scope('cov'):
             self.cov = tf.Variable(1.0 * tf.eye(self.DNA_size),
                                    dtype=tf.float16,
                                    name=name + '_cov')
         self.mvn = MultivariateNormalFullCovariance(loc=self.mean,
                                                     covariance_matrix=abs(
                                                         self.cov))
         self.mean_params = tf.get_collection(
             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/mean')
         self.cov_params = tf.get_collection(
             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/cov')
Exemple #6
0
class Worker_pop(object):
    def __init__(self, name, data):
        with tf.variable_scope(name):
            self.name = name
            self.DNA_size = data.getDNA_size()
            # self.mean_params, self.cov_params, self.mean, self.cov = self._creat_net(name, data.getDNA_size())
            with tf.variable_scope('mean'):
                self.mean = tf.Variable(tf.truncated_normal([self.DNA_size, ], stddev=0.1, mean=0.5), dtype=tf.float32,
                                        name=name + '_mean')
            with tf.variable_scope('cov'):
                self.cov = tf.Variable(1.0 * tf.eye(self.DNA_size), dtype=tf.float32, name=name + '_cov')
            self.mvn = MultivariateNormalFullCovariance(loc=self.mean, covariance_matrix=abs(self.cov))
            self.make_kid = self.mvn.sample(N_POP)
            self.tfkids_fit = tf.placeholder(tf.float32, [N_POP, ])
            self.tfkids = tf.placeholder(tf.float32, [N_POP, self.DNA_size])
            # self.loss = -tf.reduce_mean(
            #     self.mvn.log_prob(self.tfkids) * self.tfkids_fit + 0.01 * self.mvn.log_prob(
            #         self.tfkids) * self.mvn.prob(
            #         self.tfkids))
            self.loss = -tf.reduce_mean(self.mvn.log_prob(self.tfkids) * self.tfkids_fit)
            self.train_op = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
                self.loss)  # compute and apply gradients for mean and cov
            self.mean_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/mean')
            self.cov_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/cov')

    def _update_net(self):
        lock_push.acquire()
        self.push_mean_params_op = [g_p.assign(l_p) for g_p, l_p in zip(global_pop.mean_params, self.mean_params)]
        self.push_cov_params_op = [g_p.assign(l_p) for g_p, l_p in zip(global_pop.cov_params, self.cov_params)]
        sess.run([self.push_mean_params_op, self.push_cov_params_op])
        # self.update_mean = self.train_op.apply_gradients(zip(self.mean_grads, global_pop.mean_params))
        # self.update_cov = self.train_op.apply_gradients(zip(self.cov_grads, global_pop.cov_params))
        # sess.run([self.update_mean, self.update_cov])  # local grads applies to global net
        lock_push.release()

    def _pull_net(self):
        lock_pull.acquire()
        self.pull_mean_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.mean_params, global_pop.mean_params)]
        self.pull_cov_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.cov_params, global_pop.cov_params)]
        sess.run([self.pull_mean_params_op, self.pull_cov_params_op])
        lock_pull.release()
Exemple #7
0
 def __init__(self, name, data):
     with tf.variable_scope(name):
         self.name = name
         self.DNA_size = data.getDNA_size()
         # self.mean_params, self.cov_params, self.mean, self.cov = self._creat_net(name, data.getDNA_size())
         with tf.variable_scope('mean'):
             self.mean = tf.Variable(tf.truncated_normal([self.DNA_size, ], stddev=0.1, mean=0.0), dtype=tf.float32,
                                name=name + '_mean')
         with tf.variable_scope('cov'):
             self.cov = tf.Variable(1.0 * tf.eye(self.DNA_size), dtype=tf.float32, name=name + '_cov')
         # self.mvn = MultivariateNormalFullCovariance(loc=self.mean, covariance_matrix=self.cov)
         self.mvn = MultivariateNormalFullCovariance(loc=self.mean, covariance_matrix=abs(self.cov))
         # self.mvn = MultivariateNormalFullCovariance(loc=self.mean, covariance_matrix=abs(self.cov + tf.Variable(0.05 * tf.eye(self.DNA_size), dtype=tf.float32)))
         self.make_kid = self.mvn.sample(N_POP)
         self.tfkids_fit = tf.placeholder(tf.float32, [N_POP, ])
         self.tfkids = tf.placeholder(tf.float32, [N_POP, self.DNA_size])
         self.loss = -tf.reduce_mean(
             self.mvn.log_prob(self.tfkids) * self.tfkids_fit + 0.001 * self.mvn.log_prob(self.tfkids) * self.mvn.prob(
                 self.tfkids))
         self.train_op = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(self.loss)  # compute and apply gradients for mean and cov
         self.mean_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/mean')
         self.cov_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name + '/cov')
Exemple #8
0
    def __define_observations_simulation(self):
        # TODO: reduce code not to create extra operations

        self.__sim_graph = tf.Graph()
        sim_graph = self.__sim_graph

        r = self.__r
        m = self.__m
        n = self.__n
        p = self.__p

        x0_mean = self.__x0_mean
        x0_cov = self.__x0_cov

        with sim_graph.as_default():

            th = tf.placeholder(tf.float64, shape=[None], name='th')

            # TODO: this should be continuous function of time
            # but try to let pass array also
            u = tf.placeholder(tf.float64, shape=[r, None], name='u')

            t = tf.placeholder(tf.float64, shape=[None], name='t')

            # TODO: refactor

            # TODO: embed function itself in the graph, must rebuild the graph
            # if the structure of the model change
            # use tf.convert_to_tensor
            F = tf.convert_to_tensor(self.__F(th), tf.float64)
            F.set_shape([n, n])

            C = tf.convert_to_tensor(self.__C(th), tf.float64)
            C.set_shape([n, r])

            G = tf.convert_to_tensor(self.__G(th), tf.float64)
            G.set_shape([n, p])

            H = tf.convert_to_tensor(self.__H(th), tf.float64)
            H.set_shape([m, n])

            x0_mean = tf.convert_to_tensor(x0_mean(th), tf.float64)
            x0_mean = tf.squeeze(x0_mean)

            x0_cov = tf.convert_to_tensor(x0_cov(th), tf.float64)
            x0_cov.set_shape([n, n])

            x0_dist = MultivariateNormalFullCovariance(x0_mean,
                                                       x0_cov,
                                                       name='x0_dist')

            Q = tf.convert_to_tensor(self.__w_cov(th), tf.float64)
            Q.set_shape([p, p])

            w_mean = self.__w_mean.squeeze()
            w_dist = MultivariateNormalFullCovariance(w_mean, Q, name='w_dist')

            R = tf.convert_to_tensor(self.__v_cov(th), tf.float64)
            R.set_shape([m, m])
            v_mean = self.__v_mean.squeeze()
            v_dist = MultivariateNormalFullCovariance(v_mean, R, name='v_dist')

            def sim_obs(x):
                v = v_dist.sample()
                v = tf.reshape(v, [m, 1])
                y = H @ x + v  # the syntax is valid for Python >= 3.5
                return y

            def sim_loop_cond(x, y, t, k):
                N = tf.stack([tf.shape(t)[0]])
                N = tf.reshape(N, ())
                return tf.less(k, N - 1)

            def sim_loop_body(x, y, t, k):

                # TODO: this should be function of time
                u_t_k = tf.slice(u, [0, k], [r, 1])

                def state_propagate(x, t):
                    w = w_dist.sample()
                    w = tf.reshape(w, [p, 1])
                    Fx = tf.matmul(F, x, name='Fx')
                    Cu = tf.matmul(C, u_t_k, name='Cu')
                    Gw = tf.matmul(G, w, name='Gw')
                    dx = Fx + Cu + Gw
                    return dx

                tk = tf.slice(t, [k], [2], 'tk')

                x_k = x[:, -1]
                x_k = tf.reshape(x_k, [n, 1])

                # decreased default tolerance to avoid max_num_steps exceeded
                # error may increase max_num_steps as an alternative
                x_k = tf.contrib.integrate.odeint(state_propagate,
                                                  x_k,
                                                  tk,
                                                  name='state_propagate',
                                                  rtol=1e-4,
                                                  atol=1e-10)

                x_k = x_k[-1]  # last state (last row)

                y_k = sim_obs(x_k)

                # TODO: stack instead of concat
                x = tf.concat([x, x_k], 1)
                y = tf.concat([y, y_k], 1)

                k = k + 1

                return x, y, t, k

            x = x0_dist.sample(name='x0_sample')
            x = tf.reshape(x, [n, 1], name='x')

            # this zeroth measurement should be thrown away
            y = sim_obs(x)
            k = tf.constant(0, name='k')

            shape_invariants = [
                tf.TensorShape([n, None]),
                tf.TensorShape([m, None]),
                t.get_shape(),
                k.get_shape()
            ]

            sim_loop = tf.while_loop(sim_loop_cond,
                                     sim_loop_body, [x, y, t, k],
                                     shape_invariants,
                                     name='sim_loop')

            self.__sim_loop_op = sim_loop
Exemple #9
0
import numpy as np

from tensorflow.contrib.distributions import MultivariateNormalFullCovariance

rand_init = tf.random_normal_initializer()
mu = tf.get_variable(name="mu",
                     shape=[1, 16],
                     dtype=tf.float32,
                     initializer=rand_init)
log_d = tf.get_variable(name="log_d",
                        shape=[1, 16],
                        dtype=tf.float32,
                        initializer=rand_init)

with tf.name_scope(name="sample"):
    cov = tf.diag(tf.exp(log_d[0]))
    dist = MultivariateNormalFullCovariance(loc=mu, covariance_matrix=cov)
    sample = dist.sample()
    prob = dist.prob(value=sample)

with tf.name_scope("gradient"):
    optimizer = tf.train.AdamOptimizer()
    list = optimizer.compute_gradients(loss=prob, var_list=[log_d])

with tf.name_scope("miscellaneous"):
    init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    res = sess.run(list)
    print(res)
#--------------------------建立神經網路--------------------------#
''' 
build multivariate distribution(雙變數的normal distribution)
現在有 X1與X2兩個r.v
初始 cov 為 tf.eye(DNA_SIZE = 2) => 對角矩陣,代表X1與X2兩個r.v為不相關(不是獨立)!!
normal_dist.sample(POP_SIZE=20) = [x1_1    x1_2
                                   x2_1    x2_2
                                   ...   ...
                                   x20_1   x20_2] 
normal_dist.sample(POP_SIZE=20)代表讓X1與X2依照定義的 mean 與 cov 所形成的 distribution 以 sample 出20個值,並把這20個值當作訓練的數據丟進神經網路
而loss所要微分的對象即是 mean 與 var
'''
mean = tf.Variable(tf.random_normal([DNA_SIZE , ] , 5. , 1.) , dtype = tf.float32 , name = 'mean')
cov = tf.Variable(3. * tf.eye(DNA_SIZE) , dtype = tf.float32 , name = 'cov')
normal_dist = MultivariateNormalFullCovariance(loc = mean , covariance_matrix = cov)
make_child = normal_dist.sample(POP_SIZE) # 在定義好的normal_dist上sample資料

childs_fitness_input = tf.placeholder(tf.float32 , [POP_SIZE , ])
childs_input = tf.placeholder(tf.float32 , [POP_SIZE , DNA_SIZE])
loss = -tf.reduce_mean(normal_dist.log_prob(childs_input) * childs_fitness_input) # log prob * fitness
train_op = tf.train.GradientDescentOptimizer(LR).minimize(loss)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
#--------------------------建立神經網路--------------------------#


# 畫等高線圖
contour()
Exemple #11
0
from keras.engine.topology import Layer
from tensorflow.contrib.distributions import MultivariateNormalFullCovariance
import keras.backend as K
import matplotlib.pyplot as plt


f = np.load(os.path.join("numpy_data", "gmm.npz"))
latent_dim = len(f['pis'])

data = {}
for key in f.keys():
    data[key] = f[key].astype(np.float32)

modes = [None] * latent_dim
for i in range(latent_dim):
    modes[i] = MultivariateNormalFullCovariance(loc=data['means'][i, ...],
                                                covariance_matrix=data['covariances'][i, ...])

data_input = Input(shape=(2,))
q_model = Dense(16, activation='relu')(data_input)
q_model = Dense(16, activation='relu')(q_model)
q_model = Dense(16, activation='relu')(q_model)
q_model = Dense(latent_dim)(q_model)


class ElboLayer(Layer):
    def __init__(self, input_tensor, **kwargs):
        super(ElboLayer, self).__init__(**kwargs)
        self._input = input_tensor

    def call(self, logits):
        norm_logits = logits - K.tile(K.logsumexp(logits, axis=-1, keepdims=True),
N_POP = 20  # population size
N_GENERATION = 100  # training step
LR = 0.02  # learning rate


# fitness function
def get_fitness(pred):
    return -((pred[:, 0])**2 + pred[:, 1]**2)


# build multivariate distribution
mean = tf.Variable(tf.random_normal([
    2,
], 13., 1.), dtype=tf.float32)
cov = tf.Variable(5. * tf.eye(DNA_SIZE), dtype=tf.float32)
mvn = MultivariateNormalFullCovariance(
    loc=mean, covariance_matrix=cov)  # start build our model
make_kid = mvn.sample(N_POP)  # sampling operation

# compute gradient and update mean and covariance matrix from sample and fitness
tfkids_fit = tf.placeholder(tf.float32, [N_POP])
tfkids = tf.placeholder(tf.float32, [N_POP, DNA_SIZE])
loss = -tf.reduce_mean(
    mvn.log_prob(tfkids) *
    tfkids_fit)  # log prob * fitness and we want to min or obj function
train_op = tf.train.GradientDescentOptimizer(LR).minimize(
    loss)  # compute and apply gradients for mean and cov

sess = tf.Session()
sess.run(tf.global_variables_initializer())  # initialize tf variables

# something about plotting (can be ignored)
import tensorflow as tf
from tensorflow.contrib.distributions import MultivariateNormalFullCovariance

DNA_SIZE = 2         # parameter (solution) number
N_POP = 20           # population size
N_GENERATION = 100   # training step
LR = 0.02            # learning rate


# fitness function
def get_fitness(pred): return -((pred[:, 0])**2 + pred[:, 1]**2)

# build multivariate distribution
mean = tf.Variable(tf.random_normal([2, ], 13., 1.), dtype=tf.float32)
cov = tf.Variable(5. * tf.eye(DNA_SIZE), dtype=tf.float32)
mvn = MultivariateNormalFullCovariance(loc=mean, covariance_matrix=cov)
make_kid = mvn.sample(N_POP)                                    # sampling operation

# compute gradient and update mean and covariance matrix from sample and fitness
tfkids_fit = tf.placeholder(tf.float32, [N_POP, ])
tfkids = tf.placeholder(tf.float32, [N_POP, DNA_SIZE])
loss = -tf.reduce_mean(mvn.log_prob(tfkids)*tfkids_fit)         # log prob * fitness
train_op = tf.train.GradientDescentOptimizer(LR).minimize(loss) # compute and apply gradients for mean and cov

sess = tf.Session()
sess.run(tf.global_variables_initializer())                     # initialize tf variables

# something about plotting (can be ignored)
n = 300
x = np.linspace(-20, 20, n)
X, Y = np.meshgrid(x, x)