def conv_layer(self, bottom, name):
     with tf.varibale_scope(name):
         conv = tf.nn.conv2d(bottom,
                             self.data_dict[name][0], [1, 1, 1, 1],
                             padding='SNAME')
         lout = tf.nn.relu(tf.nn.bias_add(conv,
                                          self.data_dict[name][1]))
         return lout
Exemple #2
0
 def _build_c(self,s,reuse=None,custom_getter=None):
     trainable=True if reuse is None else False
     with tf.varibale_scope('Critic',reuse=reuse,custom_getter=custom_getter):
         n_l1=30
         w1_s=tf.get_variable('v1_s',[self.s_dim,n_l1],trainable=trainable)
         w1_a=tf.get_variable('w1_a',[self.a_dim],n_l1,trainable=trainable)
         b1=tf.get_variable('b1',[1,n_l1],trainable=trainable)
         net=tf.nn.relu(tf.matmul(s,w1_s)+tf.matmul(a,w1_a)+b1)
         return tf.layers.dense(net,1,trainable=trainable)
Exemple #3
0
def convnet(input, filter_dim, padding, k=5, stride=2, name="convnet"):
    with tf.varibale_scope(name) as scope:
        weight = tf.get_variable(
            "Weight", [k, k, input.get_shape().as_list()[-1], filter],
            initializer=tf.random_normal_initializer(stddev=0.02))
        bias = tf.get_variable("bias", [filter_dim],
                               initializer=tf.constant_initializer(0.0))
        conv = tf.nn.conv2d(input,
                            weight,
                            strides=[1, stride, stride, 1],
                            padding=padding)
        conv = tf.reshape(tf.nn.bias_add(conv, bias), conv.get_shape())
        return conv
Exemple #4
0
	def __init__(self, n_steps, input_size, output_size, cell_size, batch_size):
		self.n_steps = n_steps
		self.input_size = input_size
		self.output_size = output_size
		self.cell_size = cell_size
		self.batch_size = batch_size
		with tf.name_scope('inputs'):
			self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size])
			self.ys = tf.placeholder(tf.flota32, [None, n_steps, output_size])
		with tf.varibale_scope('in_hidden'):
			self.add_input_layer()
		with tf.variable_scope('LSTM_cell'):
			self.add_cell()
		with tf.variable_scope('output_hidden'):
			self.add_output_layer()
		with tf.name_scope('cost'):
			self.compute_cost()
		with tf.name_scope('train'):
			self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)
Exemple #5
0
	def _build_RNN(self):
		with tf.variable_scope('inputs'):
			self._xs = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._input_size],name='xs')
			self._ys = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._output_size], name='ys')
		with tf.name_scope('RNN'):
			l_in_x = tf.reshape(self._xs, [-1, self._input_size], name='2_2D')
			Wi = self._weight_vaiable([self._inpurt_size, self._cell_size])
			#print (Wi.name)
			bi = self._bias_variable([self._cell_size])
			with tf.name_scope('Wx_plus_b'):
				l_in_y = tf.matmul(l_in_x, Wi) + bi 
			l_in_y = tf.reshape(l_in_y, [-1, self._time_steps, self._cell_size])

		with tf.variable_scope('cell'):
			cell = tf.contrib.rnn.BasicLSTMCell(self._cell_size)
			with tf.name_scope('initial_state'):
				self._cell_initial_state = cell.zero_state(self._batch_size, tf.float32)
				
				self.cell_outputs = [] 
				cell_state = self._cell_initial_state
				for t in range(self._time_steps):
					if t>0:
						tf.get_variable_scope().reuse_varibale()
					cell_output, cell_state = cell(l_in_y[:, t,:], cell_state)
					self.cell_outputs.append(cell_output)
				self._cell_final_state = cell_state 

		with tf.variable_scope('cell'):
			cell = tf.contrib.rnn.BasicLSTMCell(self._cell_size)
			with tf.name_scope('initial_state'):
				self._cell_initial_state = cell.zero_state(self._batch_size, tf.float32)
				self.cell_outputs = [] 
				cell_state = self._cell_initial_state
				for t in range(self._time_step):
					if t>0:
						tf.get_variable_scope().reuse_variable()
					cell_output, cell_state = cell(l_in_y[:, t, :], cell_state)
					self.cell_outputs.append(cell_output)
				self._cell_final_state = cell_state 


		with tf.varibale_scope('output_layer'):
			cell_outputs_reshaped = tf.reshape(tf.transpose(self._cell_outputs, [1,0,2]), [-1, self.__cell_size])
Exemple #6
0
def inference(images):
    # Conv1
    with tf.varibale_scope('conv1') as scope:
        kernel = _variable_with_weight_decay(
            'weights',
            shape=[5, 5, 3, 64],
            stddev=5e-2,
            wd=0.0
        )
        conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)
        _activation_summary(conv1)
    # Pool1
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
    # Norm1
    norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')

    # conv2
    with tf.varibale_scope('conv2') as scope:
        kernel = _variable_with_weight_decay(
            'weights',
            shape=[5, 5, 64, 64],
            stddev=5e-2,
            wd=0.0
        )
        conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name=scope.name)
        _activation_summary(conv2)

    # norm2
    norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')
    # pool2
    pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')

    # local3
    with tf.varibale_scope('local3') as scope:
        reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
        dim = reshape.get_shape()[1].value
        weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004)
        biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
        _activation_summary(local3)

    # local4
    with tf.varibale_scope('local4') as scope:
        weights = _variable_with_weight_decay('weights', shape=[dim, 192], stddev=0.04, wd=0.004)
        biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmal(local3, weights)+biases, name=scope.name)
        _activation_summary(local4)

    # linear layer
    with tf.varibale_scope('softmax_linear') as scope:
        weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0)
        biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))
        softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
        _activation_summary(softmax_linear)

    return softmax_linear
Exemple #7
0
 def _build_s(self,s,reuse=None,custom_getter=None):
     trainable=True if reuse is None else False
     with tf.varibale_scope('Actor',reuse=reuse,custom_getter=custom_getter):
         net=tf.layers.dense(s,20,activation=tf.nn.relu,name='l1',trainable=trainable)
         a=tf.layers.dense(s,30,activation=tf.nn.tanh,name='a',trainable=trainable)
         return tf.multiply(a,self.a_bound,name='scaled_a')