def mask_2D_resiBlock(x, filter_nums): w = basic_DL_op.weight_variable('conv1', [3, 3, filter_nums, filter_nums], 0.01) mask = [[1, 1, 1], [1, 1, 0], [0, 0, 0]] mask = tf.reshape(mask, shape=[3, 3, 1, 1]) mask = tf.tile(mask, multiples=[1, 1, filter_nums, filter_nums]) mask = tf.cast(mask, dtype=tf.float32) w = w * mask b = basic_DL_op.bias_variable('bias1', [filter_nums]) c = basic_DL_op.conv2d(x, w) + b c = tf.nn.relu(c) w = basic_DL_op.weight_variable('conv2', [3, 3, filter_nums, filter_nums], 0.01) w = w * mask b = basic_DL_op.bias_variable('bias2', [filter_nums]) c = basic_DL_op.conv2d(c, w) + b return x + c
def resiBlock_2D_context(x, filter_nums): w = basic_DL_op.weight_variable('conv1_c', [3, 3, filter_nums, filter_nums], 0.01) b = basic_DL_op.bias_variable('bias1_c', [filter_nums]) c = basic_DL_op.conv2d(x, w) + b c = tf.nn.relu(c) w = basic_DL_op.weight_variable('conv2_c', [3, 3, filter_nums, filter_nums], 0.01) b = basic_DL_op.bias_variable('bias2_c', [filter_nums]) c = basic_DL_op.conv2d(c, w) + b return x + c
def lstm_layer(x, h, c, in_num, out_num): # the first layer: input w = basic_DL_op.weight_variable('conv1', [3, 3, in_num, out_num], 0.01) x = basic_DL_op.conv2d(x, w) # the first layer: state w = basic_DL_op.weight_variable('conv2', [3, 3, out_num, out_num], 0.01) h = basic_DL_op.conv2d(h, w) b = basic_DL_op.bias_variable('bias', [out_num]) c, h = lstm_logic(x + h + b, c) return c, h
def mask_2D_layer(x, static_QP, context, features=128, resi_num=2, para_num=58): x = x / static_QP label = x # x = tf.stop_gradient(x) ################## layer 1, linear w = basic_DL_op.weight_variable('conv1', [3, 3, 1, features], 0.01) w_c = basic_DL_op.weight_variable('conv1_c', [3, 3, 1, features], 0.01) mask = [[1, 1, 1], [1, 0, 0], [0, 0, 0]] mask = tf.reshape(mask, shape=[3, 3, 1, 1]) mask = tf.tile(mask, multiples=[1, 1, 1, features]) mask = tf.cast(mask, dtype=tf.float32) w = w * mask b = basic_DL_op.bias_variable('bias1', [features]) b_c = basic_DL_op.bias_variable('bias1_c', [features]) x = basic_DL_op.conv2d(x, w) + b context = basic_DL_op.conv2d(context, w_c) + b_c conv1 = x x = x + context ################## layers: resi_num resi_block for i in range(resi_num): with tf.variable_scope('resi_block' + str(i)): x = mask_2D_resiBlock(x, features) context = resiBlock_2D_context(context, features) x = x + context x = conv1 + x ################# conv: after skip connection, relu w = basic_DL_op.weight_variable('conv2', [3, 3, features, features], 0.01) mask = [[1, 1, 1], [1, 1, 0], [0, 0, 0]] mask = tf.reshape(mask, shape=[3, 3, 1, 1]) mask = tf.tile(mask, multiples=[1, 1, features, features]) mask = tf.cast(mask, dtype=tf.float32) w = w * mask b = basic_DL_op.bias_variable('bias2', [features]) x = basic_DL_op.conv2d(x, w) + b x = tf.nn.relu(x) ################# convs: 1x1, relu/linear w = basic_DL_op.weight_variable('conv3', [1, 1, features, features], 0.01) b = basic_DL_op.bias_variable('bias3', [features]) x = basic_DL_op.conv2d(x, w) + b x = tf.nn.relu(x) w = basic_DL_op.weight_variable('conv4', [1, 1, features, features], 0.01) b = basic_DL_op.bias_variable('bias4', [features]) x = basic_DL_op.conv2d(x, w) + b x = tf.nn.relu(x) w = basic_DL_op.weight_variable('conv5', [1, 1, features, para_num], 0.01) b = basic_DL_op.bias_variable('bias5', [para_num]) x = basic_DL_op.conv2d(x, w) + b ################# cal the cdf with the output params h = tf.nn.softplus(x[:, :, :, 0:33]) b = x[:, :, :, 33:46] a = tf.tanh(x[:, :, :, 46:58]) lower = label - 0.5 / static_QP high = label + 0.5 / static_QP lower = cal_cdf(lower, h, b, a) high = cal_cdf(high, h, b, a) prob = tf.maximum((high - lower), 1e-9) cross_entropy = -tf.reduce_mean(tf.log(prob)) return cross_entropy