示例#1
0
def out_block(input_anc,
              input_pos,
              channels,
              laxer_idx,
              stride_input=1,
              k_size=8,
              padding_type='VALID'):

    # Last conv layer, flatten the output
    weights = ops.weight([k_size, k_size, k_size, channels[0], channels[1]],
                         layer_name='wcnn' + str(laxer_idx + 1))

    bias = ops.bias([1, 1, 1, channels[1]],
                    layer_name='bcnn' + str(laxer_idx + 1))

    conv_output_anc = tf.add(
        ops.conv3d(input_anc,
                   weights,
                   stride=[stride_input, stride_input, stride_input],
                   padding=padding_type), bias)
    conv_output_pos = tf.add(
        ops.conv3d(input_pos,
                   weights,
                   stride=[stride_input, stride_input, stride_input],
                   padding=padding_type), bias)

    conv_output_anc = ops.batch_norm(conv_output_anc)
    conv_output_pos = ops.batch_norm(conv_output_pos)

    conv_output_anc = tf.contrib.layers.flatten(conv_output_anc)
    conv_output_pos = tf.contrib.layers.flatten(conv_output_pos)

    return conv_output_anc, conv_output_pos
示例#2
0
文件: models.py 项目: McMvMc/lsm_mike
def grid_unet64(net, cost_vol):
    n, h, w, d, ch = cost_vol.get_shape().as_list()
    net.grid_net = {}
    with tf.variable_scope('Grid_Unet'):
        conv1 = conv3d('conv1',
                       cost_vol,
                       4,
                       32,
                       activation=None,
                       norm=net.norm,
                       mode=net.mode)
        net.grid_net['conv1'] = conv1
        conv2 = conv3d('conv2', conv1, 4, 64, norm=net.norm, mode=net.mode)
        net.grid_net['conv2'] = conv2
        conv3 = conv3d('conv3', conv2, 4, 128, norm=net.norm, mode=net.mode)
        net.grid_net['conv3'] = conv3
        conv4 = conv3d('conv4', conv3, 4, 256, norm=net.norm, mode=net.mode)
        net.grid_net['conv4'] = conv4
        deconv1 = deconv3d('deconv1',
                           conv4,
                           4,
                           128,
                           norm=net.norm,
                           mode=net.mode)
        net.grid_net['deconv1'] = deconv1
        deconv1 = tf.concat([deconv1, conv3], axis=4)
        deconv2 = deconv3d('deconv2',
                           deconv1,
                           4,
                           64,
                           norm=net.norm,
                           mode=net.mode)
        net.grid_net['deconv2'] = deconv2
        deconv2 = tf.concat([deconv2, conv2], axis=4)
        deconv3 = deconv3d('deconv3',
                           deconv2,
                           4,
                           32,
                           norm=net.norm,
                           mode=net.mode)
        net.grid_net['deconv3'] = deconv3
        deconv3 = tf.concat([deconv3, conv1], axis=4)
        deconv4 = deconv3d('deconv4',
                           deconv3,
                           4,
                           32,
                           norm=net.norm,
                           mode=net.mode)
        net.grid_net['deconv4'] = deconv4
        final_vol = deconv3d('out',
                             deconv4,
                             4,
                             1,
                             stride=1,
                             norm=None,
                             mode=net.mode)
        net.grid_net['out'] = final_vol

    return final_vol
示例#3
0
    def encoder(self, inputs, phase_train=True, reuse=False):
        with tf.variable_scope("encoder") as scope:
            if reuse:
                scope.reuse_variables()

            d_1 = conv3d(inputs,
                         shape=[4, 4, 4, 1, self.ef_dim],
                         strides=[1, 2, 2, 2, 1],
                         scope='conv_1')
            d_1 = lrelu(d_1)

            d_2 = conv3d(d_1,
                         shape=[4, 4, 4, self.ef_dim, self.ef_dim * 2],
                         strides=[1, 2, 2, 2, 1],
                         scope='conv_2')
            d_2 = lrelu(d_2)

            d_3 = conv3d(d_2,
                         shape=[4, 4, 4, self.ef_dim * 2, self.ef_dim * 4],
                         strides=[1, 2, 2, 2, 1],
                         scope='conv_3')
            d_3 = lrelu(d_3)

            d_4 = conv3d(d_3,
                         shape=[4, 4, 4, self.ef_dim * 4, self.ef_dim * 8],
                         strides=[1, 2, 2, 2, 1],
                         scope='conv_4')
            d_4 = lrelu(d_4)

            d_5 = conv3d(d_4,
                         shape=[4, 4, 4, self.ef_dim * 8, self.ef_dim * 8],
                         strides=[1, 1, 1, 1, 1],
                         scope='conv_5',
                         padding="VALID")
            d_5 = tf.reshape(d_5, [-1, self.ef_dim * 8])
            d_5 = tf.nn.sigmoid(d_5)

            l1 = linear(d_5, self.ef_dim * 16, scope='linear_1')
            l1 = lrelu(l1)

            l2 = linear(l1, self.ef_dim * 32, scope='linear_2')
            l2 = lrelu(l2)

            l3 = linear(l2, self.ef_dim * 64, scope='linear_3')
            l3 = lrelu(l3)

            l4_m = linear(l3, self.p_dim * 3, scope='linear_4m')
            l4_b = linear(l3, self.p_dim, scope='linear_4b')

            l4_m = tf.reshape(l4_m, [-1, 3, self.p_dim])
            l4_b = tf.reshape(l4_b, [-1, 1, self.p_dim])

            return l4_m, l4_b, d_5
示例#4
0
def conv_block(input,
               channels,
               dropout_flag,
               dropout_rate,
               laxer_idx,
               stride_input=1,
               k_size=3,
               padding_type='SAME'):
    # Traditional 3D conv layer followed by batch norm and relu activation

    i_size = input.get_shape().as_list()[-2] / stride_input

    weights = ops.weight([k_size, k_size, k_size, channels[0], channels[1]],
                         layer_name='wcnn' + str(laxer_idx + 1),
                         reuse=tf.get_variable_scope().reuse)

    bias = ops.bias([i_size, i_size, i_size, channels[1]],
                    layer_name='bcnn' + str(laxer_idx + 1),
                    reuse=tf.get_variable_scope().reuse)

    conv_output = tf.add(
        ops.conv3d(input,
                   weights,
                   stride=[stride_input, stride_input, stride_input],
                   padding=padding_type), bias)
    conv_output = ops.batch_norm(conv_output)
    conv_output = ops.relu(conv_output)

    if dropout_flag:
        conv_output = Dropout(conv_output, keep_prob=dropout_rate)

    return conv_output
示例#5
0
    def discriminator(self, image, train, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        reshaped_img = tf.reshape(image, [
            self.batch_size, self.image_size[0], self.image_size[1],
            self.image_size[1], 1
        ])
        h0 = ops.conv3d(reshaped_img, self.d_size, name='d_h0_conv')
        h0 = ops.lrelu(self.d_bn0(h0, train))
        h1 = ops.conv3d(h0, self.d_size * 2, name='d_h1_conv')
        h1 = ops.lrelu(self.d_bn1(h1, train))
        h2 = ops.conv3d(h1, self.d_size * 4, name='d_h2_conv')
        h2 = ops.lrelu(self.d_bn2(h2, train))
        h3 = ops.conv3d(h2, self.d_size * 8, name='d_h3_conv')
        h3 = ops.lrelu(self.d_bn3(h3, train))
        h3 = tf.reshape(h3, [self.batch_size, -1])
        h4 = ops.linear(h3, h3.get_shape()[1], 1, scope='d_h5_lin')

        return tf.nn.sigmoid(h4), h4, h2
示例#6
0
def dis_net(data_array, y, weights, biases, reuse=False):
    # mnist data's shape is (28, 28, 1)
    yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
    data_array = conv_cond_concat(data_array, yb)

    print("-------------")
    print(data_array.get_shape())
    if channel == 1:
        conv1 = conv2d(data_array, weights['wc1'], biases['bc1'])
    else:
        conv1 = conv3d(data_array, weights['wc1_1'], biases['bc1'])
    conv1 = lrelu(conv1)
    conv1 = conv_cond_concat(conv1, yb)

    if (channel == 1):
        conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    else:
        conv2 = conv3d(conv1, weights['wc2_1'], biases['bc2'])
    conv2 = batch_normal(conv2, scope="dis_bn1", reuse=reuse)
    conv2 = lrelu(conv2)
    conv2 = tf.reshape(conv2, [batch_size, -1])
    conv2 = tf.concat([conv2, y], 1)

    # 可视化存在收集器
    tf.add_to_collection('weight_1', weights['wc1'])
    tf.add_to_collection('ac_1', conv1)
    tf.add_to_collection('weight_2', weights['wc2'])
    tf.add_to_collection('ac_2', conv2)

    f1 = fully_connect(conv2, weights['wc3'], biases['bc3'])
    f1 = batch_normal(f1, scope="dis_bn2", reuse=reuse)
    f1 = lrelu(f1)
    f1 = tf.concat([f1, y], 1)

    out = fully_connect(f1, weights['wd'], biases['bd'])

    return tf.nn.sigmoid(out), out