/
layers.py
59 lines (54 loc) · 2.04 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
"""
houses neural network layers
"""
import tensorflow as tf
from utils import _variable_with_weight_decay, _variable_on_cpu, _activation_summary
from tensorflow.python.ops import control_flow_ops
def conv_layer(state_below, scope_name, n_inputs, n_outputs, filter_shape, stddev, wd):
"""
A Standard convolutional layer
"""
with tf.variable_scope(scope_name) as scope:
kernel = _variable_with_weight_decay(
"weights", shape=[filter_shape[0], filter_shape[1], n_inputs, n_outputs],
wd=wd
)
conv = tf.nn.conv2d(state_below, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu("biases", [n_outputs], tf.constant_initializer(0.0))
bias = tf.add(conv, biases)
output = tf.nn.relu(bias, name=scope.name)
_activation_summary(output)
return output
def reshape_conv_layer(state_below):
"""
Reshapes a conv layer activations to be linear. Assumes that batch dimension is 0
assumes that all other dimensions as held constant
"""
dims = state_below.get_shape().as_list()
# get back size as tensor so batch size can be dynamic!
batch_size = tf.shape(state_below)[0]
conv_dims = dims[1:]
dim = 1
for d in conv_dims:
dim *= d
reshape = tf.reshape(state_below, tf.pack([batch_size, dim]))
return reshape, dim
def linear_layer(state_below, scope_name, n_inputs, n_outputs, stddev, wd, use_nonlinearity=True):
"""
Standard linear neural network layer
"""
with tf.variable_scope(scope_name) as scope:
weights = _variable_with_weight_decay(
'weights', [n_inputs, n_outputs],
stddev=stddev, wd=wd
)
biases = _variable_on_cpu(
'biases', [n_outputs], tf.constant_initializer(0.0)
)
activation = tf.nn.xw_plus_b(state_below, weights, biases, name="activation")
if use_nonlinearity:
output = tf.nn.relu(activation, name=scope.name)
else:
output = activation
_activation_summary(output)
return output