def enc_up(x, c, init=False, dropout_p=0.5, n_scales=1, n_residual_blocks=2, activation="elu", n_filters=64, max_filters=128): with model_arg_scope(init=init, dropout_p=dropout_p, activation=activation): # outputs hs = [] # prepare input # 这一行也很奇怪, 为什么要把x和c连起来呢? # xc = tf.concat([x,c], axis = -1) xc = x h = nn.nin(xc, n_filters) for l in range(n_scales): # level module for i in range(n_residual_blocks): h = nn.residual_block(h) hs.append(h) # prepare input to next level if l + 1 < n_scales: # 似乎它这个channel一直都是128, 没有增长过. n_filters = min(2 * n_filters, max_filters) h = nn.downsample(h, n_filters) return hs
def enc_up( x, c, init=False, dropout_p=0.5, n_scales=1, n_residual_blocks=2, activation="elu", n_filters=64, max_filters=128, ): with model_arg_scope(init=init, dropout_p=dropout_p, activation=activation): """c is actually not used""" # outputs hs = [] # prepare input # xc = tf.concat([x,c], axis = -1) xc = x h = nn.nin(xc, n_filters) for l in range(n_scales): # level module for i in range(n_residual_blocks): h = nn.residual_block(h) hs.append(h) # prepare input to next level if l + 1 < n_scales: n_filters = min(2 * n_filters, max_filters) h = nn.downsample(h, n_filters) return hs
def dec_up( c, init=False, dropout_p=0.5, n_scales=1, n_residual_blocks=2, activation="elu", n_filters=64, max_filters=128, ): with model_arg_scope(init=init, dropout_p=dropout_p, activation=activation): # outputs hs = [] # prepare input h = nn.nin(c, n_filters) for l in range(n_scales): # level module for i in range(n_residual_blocks): h = nn.residual_block(h) hs.append(h) # prepare input to next level if l + 1 < n_scales: n_filters = min(2 * n_filters, max_filters) h = nn.downsample(h, n_filters) return hs
def dec_up(c, init=False, dropout_p=0.5, n_scales=1, n_residual_blocks=2, activation="elu", n_filters=64, max_filters=128): with model_arg_scope(init=init, dropout_p=dropout_p, activation=activation): hs = [] h = nn.nin(c, n_filters) for l in range(n_scales): for i in range(n_residual_blocks): h = nn.residual_block(h) hs.append(h) if l + 1 < n_scales: n_filters = min(2 * n_filters, max_filters) h = nn.downsample(h, n_filters) return hs
def cfn( x, init = False, dropout_p = 0.5, n_scales = 1, n_residual_blocks = 2, activation = "elu", n_filters = 64, max_filters = 128): with model_arg_scope( init = init, dropout_p = dropout_p, activation = activation): # outputs hs = [] # prepare input xc = x h = nn.nin(xc, n_filters) for l in range(n_scales): # level module for i in range(n_residual_blocks): h = nn.residual_block(h) hs.append(h) # prepare input to next level if l + 1 < n_scales: n_filters = min(2*n_filters, max_filters) h = nn.downsample(h, n_filters) h_shape = h.shape.as_list() h = tf.reshape(h, [h_shape[0],1,1,h_shape[1]*h_shape[2]*h_shape[3]]) h = nn.nin(h, 2*max_filters) hs.append(h) return hs