def mask_2D_resiBlock(x, filter_nums):

    w = basic_DL_op.weight_variable('conv1', [3, 3, filter_nums, filter_nums],
                                    0.01)

    mask = [[1, 1, 1], [1, 1, 0], [0, 0, 0]]

    mask = tf.reshape(mask, shape=[3, 3, 1, 1])

    mask = tf.tile(mask, multiples=[1, 1, filter_nums, filter_nums])

    mask = tf.cast(mask, dtype=tf.float32)

    w = w * mask

    b = basic_DL_op.bias_variable('bias1', [filter_nums])

    c = basic_DL_op.conv2d(x, w) + b

    c = tf.nn.relu(c)

    w = basic_DL_op.weight_variable('conv2', [3, 3, filter_nums, filter_nums],
                                    0.01)

    w = w * mask

    b = basic_DL_op.bias_variable('bias2', [filter_nums])

    c = basic_DL_op.conv2d(c, w) + b

    return x + c
Beispiel #2
0
def p_block(x):

    w = basic_DL_op.weight_variable('conv1', [3, 3, 1, 16], 0.01)
    b = basic_DL_op.bias_variable('bias1', [16])
    tmp = basic_DL_op.conv2d_pad(x, w) + b

    conv1 = tmp

    tmp = tf.nn.tanh(tmp)
    w = basic_DL_op.weight_variable('conv2', [3, 3, 16, 16], 0.01)
    b = basic_DL_op.bias_variable('bias2', [16])
    tmp = basic_DL_op.conv2d_pad(tmp, w) + b

    tmp = tf.nn.tanh(tmp)
    w = basic_DL_op.weight_variable('conv3', [3, 3, 16, 16], 0.01)
    b = basic_DL_op.bias_variable('bias3', [16])
    tmp = basic_DL_op.conv2d_pad(tmp, w) + b

    tmp = conv1 + tmp

    w = basic_DL_op.weight_variable('conv4', [3, 3, 16, 1], 0.01)
    b = basic_DL_op.bias_variable('bias4', [1])
    out = basic_DL_op.conv2d_pad(tmp, w) + b

    return out
def resiBlock_2D_context(x, filter_nums):

    w = basic_DL_op.weight_variable('conv1_c',
                                    [3, 3, filter_nums, filter_nums], 0.01)

    b = basic_DL_op.bias_variable('bias1_c', [filter_nums])

    c = basic_DL_op.conv2d(x, w) + b

    c = tf.nn.relu(c)

    w = basic_DL_op.weight_variable('conv2_c',
                                    [3, 3, filter_nums, filter_nums], 0.01)

    b = basic_DL_op.bias_variable('bias2_c', [filter_nums])

    c = basic_DL_op.conv2d(c, w) + b

    return x + c
Beispiel #4
0
def lstm_layer(x, h, c, in_num, out_num):

    # the first layer: input

    w = basic_DL_op.weight_variable('conv1', [3, 3, in_num, out_num], 0.01)

    x = basic_DL_op.conv2d(x, w)

    # the first layer: state

    w = basic_DL_op.weight_variable('conv2', [3, 3, out_num, out_num], 0.01)

    h = basic_DL_op.conv2d(h, w)

    b = basic_DL_op.bias_variable('bias', [out_num])

    c, h = lstm_logic(x + h + b, c)

    return c, h
Beispiel #5
0
def deconv_layer(x):

    x_shape = x.get_shape().as_list()

    kernel = basic_DL_op.weight_variable('deconv',
                                         [3, 3, x_shape[3], x_shape[3]], 0.01)

    x = tf.nn.conv2d_transpose(x,
                               kernel,
                               output_shape=[
                                   x_shape[0],
                                   int(x_shape[1] * 2),
                                   int(x_shape[2] * 2), x_shape[3]
                               ],
                               strides=[1, 2, 2, 1],
                               padding="SAME")

    return x
def mask_2D_layer(x,
                  static_QP,
                  context,
                  features=128,
                  resi_num=2,
                  para_num=58):

    x = x / static_QP
    label = x

    # x = tf.stop_gradient(x)

    ################## layer 1, linear

    w = basic_DL_op.weight_variable('conv1', [3, 3, 1, features], 0.01)
    w_c = basic_DL_op.weight_variable('conv1_c', [3, 3, 1, features], 0.01)

    mask = [[1, 1, 1], [1, 0, 0], [0, 0, 0]]

    mask = tf.reshape(mask, shape=[3, 3, 1, 1])

    mask = tf.tile(mask, multiples=[1, 1, 1, features])

    mask = tf.cast(mask, dtype=tf.float32)

    w = w * mask

    b = basic_DL_op.bias_variable('bias1', [features])
    b_c = basic_DL_op.bias_variable('bias1_c', [features])

    x = basic_DL_op.conv2d(x, w) + b
    context = basic_DL_op.conv2d(context, w_c) + b_c

    conv1 = x
    x = x + context

    ################## layers: resi_num resi_block

    for i in range(resi_num):
        with tf.variable_scope('resi_block' + str(i)):

            x = mask_2D_resiBlock(x, features)

            context = resiBlock_2D_context(context, features)

            x = x + context

    x = conv1 + x

    ################# conv: after skip connection, relu

    w = basic_DL_op.weight_variable('conv2', [3, 3, features, features], 0.01)

    mask = [[1, 1, 1], [1, 1, 0], [0, 0, 0]]

    mask = tf.reshape(mask, shape=[3, 3, 1, 1])

    mask = tf.tile(mask, multiples=[1, 1, features, features])

    mask = tf.cast(mask, dtype=tf.float32)

    w = w * mask

    b = basic_DL_op.bias_variable('bias2', [features])

    x = basic_DL_op.conv2d(x, w) + b

    x = tf.nn.relu(x)

    ################# convs: 1x1, relu/linear

    w = basic_DL_op.weight_variable('conv3', [1, 1, features, features], 0.01)

    b = basic_DL_op.bias_variable('bias3', [features])

    x = basic_DL_op.conv2d(x, w) + b

    x = tf.nn.relu(x)

    w = basic_DL_op.weight_variable('conv4', [1, 1, features, features], 0.01)

    b = basic_DL_op.bias_variable('bias4', [features])

    x = basic_DL_op.conv2d(x, w) + b

    x = tf.nn.relu(x)

    w = basic_DL_op.weight_variable('conv5', [1, 1, features, para_num], 0.01)

    b = basic_DL_op.bias_variable('bias5', [para_num])

    x = basic_DL_op.conv2d(x, w) + b

    ################# cal the cdf with the output params

    h = tf.nn.softplus(x[:, :, :, 0:33])
    b = x[:, :, :, 33:46]
    a = tf.tanh(x[:, :, :, 46:58])

    lower = label - 0.5 / static_QP
    high = label + 0.5 / static_QP

    lower = cal_cdf(lower, h, b, a)
    high = cal_cdf(high, h, b, a)

    prob = tf.maximum((high - lower), 1e-9)

    cross_entropy = -tf.reduce_mean(tf.log(prob))

    return cross_entropy