Esempio n. 1
0
prev_y = tf.placeholder(
    tf.float32, shape=[None, params.res["height"], params.res["width"]])

prev_y_in = tf.sub(tf.expand_dims(prev_y, 3), 0.5)
x_in = tf.concat(3, [x, prev_y_in])
ch_in = 2

##############################
# Section 1

# Convolution
layer_name = "s1_conv1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([k_size, k_size, ch_in, ch[0]])
    b = utils.bias_variable([ch[0]])
    conv = utils.conv2d(x_in, W, b, 1)

    tanh = tf.nn.tanh(conv)
    s1_conv1 = tf.nn.dropout(tanh, keep_prob)

##############################
# Section 2

# Downsampling convolution
layer_name = "s2_conv1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([k_size, k_size, ch[0], ch[1]])
    b = utils.bias_variable([ch[1]])
    conv = utils.conv2d(s1_conv1, W, b, 2)

    tanh = tf.nn.tanh(conv)
Esempio n. 2
0
prev_y = tf.placeholder(
    tf.float32, shape=[None, params.res["height"], params.res["width"]])

prev_y_in = tf.sub(tf.expand_dims(prev_y, 3), 0.5)
x_in = tf.concat(3, [x, prev_y_in])
ch_in = 2

##############################
# Section 1

# Convolution
layer_name = "s1_conv1_1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([5, 5, ch_in, ch[0]])
    b = utils.bias_variable([ch[0]])
    conv = utils.conv2d(x_in, W, b, 1)

    tanh = tf.nn.tanh(conv)
    s1_conv1_1 = tf.nn.dropout(tanh, keep_prob)

##############################
# Section 2

# Asymmetric convolution (1x5)
layer_name = "s2_conv1_1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([1, 5, ch[0], ch[1]])
    b = utils.bias_variable([ch[1]])
    conv = utils.conv2d(s1_conv1_1, W, b, 1)

    tanh = tf.nn.tanh(conv)
Esempio n. 3
0
prev_y = tf.placeholder(
    tf.float32, shape=[None, params.res["height"], params.res["width"]])

prev_y_in = tf.sub(tf.expand_dims(prev_y, 3), 0.5)
x_in = tf.concat(3, [x, prev_y_in])
ch_in = 2

##############################
# Section 1

# Convolution
layer_name = "s1_conv1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([k_size, k_size, ch_in, ch[0]])
    b = utils.bias_variable([ch[0]])
    conv = utils.conv2d(x_in, W, b, 1)

    tanh = tf.nn.tanh(conv)
    s1_conv1 = tf.nn.dropout(tanh, keep_prob)

##############################
# Section 2

# Downsampling convolution
layer_name = "s2_conv1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([k_size, k_size, ch[0], ch[1]])
    b = utils.bias_variable([ch[1]])
    conv = utils.conv2d(s1_conv1, W, b, 2)

    tanh = tf.nn.tanh(conv)
Esempio n. 4
0
y_ = tf.placeholder(tf.bool, shape=[None, params.res["height"], params.res["width"]])
prev_y = tf.placeholder(tf.float32, shape=[None, params.res["height"], params.res["width"]])

prev_y_in = tf.sub(tf.expand_dims(prev_y, 3), 0.5)
x_in = tf.concat(3, [x, prev_y_in])
ch_in = 2

##############################
# Section 1

# Convolution
layer_name = "s1_conv1"
with tf.name_scope(layer_name):
  W = utils.weight_variable([k_size, k_size, ch_in, ch[0]])
  b = utils.bias_variable([ch[0]])
  conv = utils.conv2d(x_in, W, b, 1)

  tanh = tf.nn.tanh(conv)
  s1_conv1 = tf.nn.dropout(tanh, keep_prob)


##############################
# Section 2

# Downsampling convolution
layer_name = "s2_conv1"
with tf.name_scope(layer_name):
  W = utils.weight_variable([k_size, k_size, ch[0], ch[1]])
  b = utils.bias_variable([ch[1]])
  conv = utils.conv2d(s1_conv1, W, b, 2)
Esempio n. 5
0
prev_y = tf.placeholder(
    tf.float32, shape=[None, params.res["height"], params.res["width"]])

prev_y_in = tf.sub(tf.expand_dims(prev_y, 3), 0.5)
x_in = tf.concat(3, [x, prev_y_in])
ch_in = 2

##############################
# Section 1

# Convolution
layer_name = "s1_conv1"
with tf.name_scope(layer_name):
    W = utils.weight_variable([k_size, k_size, ch_in, ch])
    b = utils.bias_variable([ch])
    conv = utils.conv2d(x_in, W, b, 1)

    tanh = tf.nn.tanh(conv)
    s1_conv1 = tf.nn.dropout(tanh, keep_prob)

# Convolution
layer_name = "s1_conv2"
with tf.name_scope(layer_name):
    W = utils.weight_variable([k_size, k_size, ch, ch])
    b = utils.bias_variable([ch])
    conv = utils.conv2d(s1_conv1, W, b, 1)

    tanh = tf.nn.tanh(conv)
    s1_conv2 = tf.nn.dropout(tanh, keep_prob)

# Convolution