Пример #1
0
    def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self.reuse):
            input = ly.fc(input, 7 * 7 * 128, name='fc_0')
            input = ly.bn_layer(input, name='bn_0')
            input = tf.nn.leaky_relu(input)

            input = tf.reshape(input, (-1, 7, 7, 128))

            input = ly.deconv2d(input,
                                output_channel=64,
                                output_size=14,
                                strides=2,
                                name='deconv_0')
            input = ly.bn_layer(input, name='bn_1')
            input = tf.nn.leaky_relu(input)

            input = ly.deconv2d(input,
                                output_channel=1,
                                output_size=28,
                                strides=2,
                                name='deconv_1')
            input = ly.bn_layer(input, name='bn_2')
            input = tf.nn.sigmoid(input)

        return input  ## (-1,28,28,1)
Пример #2
0
    def generator2(self, g1):
        with tf.variable_scope("generator2", reuse=tf.AUTO_REUSE):
            #2º generator
            x_2 = lay.deconv2d(g1, f=64, name='g-conv2d-0')
            x_2 = tf.nn.leaky_relu(x_2, alpha=0.1)

            x_2 = lay.deconv2d(x_2, f=32, name='g-conv2d-1')
            out = tf.nn.leaky_relu(x_2, alpha=0.1)

            return out
Пример #3
0
 def generator(self, z, const_init=False, trainable=True):
     # (n, 256, 7, 7)
     h0 = layers.dense(z,
                       7 * 7 * 256,
                       name="g_fc1",
                       const_init=const_init,
                       trainable=trainable)
     h0 = layers.batchnorm(h0, axis=1, name="g_bn1", trainable=trainable)
     # h0 = layers.batchnorm(h0, axis=1, name="g_bn1")
     h0 = flow.nn.leaky_relu(h0, 0.3)
     h0 = flow.reshape(h0, (-1, 256, 7, 7))
     # (n, 128, 7, 7)
     h1 = layers.deconv2d(
         h0,
         128,
         5,
         strides=1,
         name="g_deconv1",
         const_init=const_init,
         trainable=trainable,
     )
     h1 = layers.batchnorm(h1, name="g_bn2", trainable=trainable)
     # h1 = layers.batchnorm(h1, name="g_bn2")
     h1 = flow.nn.leaky_relu(h1, 0.3)
     # (n, 64, 14, 14)
     h2 = layers.deconv2d(
         h1,
         64,
         5,
         strides=2,
         name="g_deconv2",
         const_init=const_init,
         trainable=trainable,
     )
     h2 = layers.batchnorm(h2, name="g_bn3", trainable=trainable)
     # h2 = layers.batchnorm(h2, name="g_bn3")
     h2 = flow.nn.leaky_relu(h2, 0.3)
     # (n, 1, 28, 28)
     out = layers.deconv2d(
         h2,
         1,
         5,
         strides=2,
         name="g_deconv3",
         const_init=const_init,
         trainable=trainable,
     )
     out = flow.math.tanh(out)
     return out
Пример #4
0
def generator(z, valence, arousal, reuse_variables=False):
    """
    Creates generator network.
    
    @param z: tensor of size config.num_z_channels
    @param valence: tensor of size 1
    @param arousal: tensor of size 1
    
    @return: tensor of size 96x96x3
    """
    if reuse_variables:
        tf.get_variable_scope().reuse_variables()

    with tf.variable_scope("generator") as scope:

        # duplicate valence/arousal label and concatenate to z
        z = concat_label(z, valence, duplicate=num_z_channels)
        z = concat_label(z, arousal, duplicate=num_z_channels)

        # -- fc layer
        name = 'G_fc'
        current = dense(z, 1024 * 6 * 6, reuse=reuse_variables)
        # reshape
        current = tf.reshape(current, [-1, 6, 6, 1024])
        current = tf.nn.relu(current)

        # -- transposed convolutional layer 1-4
        for index, num_filters in enumerate([512, 256, 128, 64]):
            name = 'G_deconv' + str(index + 1)
            current = deconv2d(current,
                               num_filters,
                               name=name,
                               reuse=reuse_variables)
            current = tf.nn.relu(current)

        # -- transposed convolutional layer 5+6
        current = deconv2d(current,
                           32,
                           stride=1,
                           name='G_deconv5',
                           reuse=reuse_variables)
        current = tf.nn.relu(current)

        current = deconv2d(current,
                           3,
                           stride=1,
                           name='G_deconv6',
                           reuse=reuse_variables)
        return tf.nn.tanh(current)
Пример #5
0
def generator(real_img, desired_au, reuse=False):
    '''
    :param:
        real_img: RGB face images, shape [batch, 128, 128, 3], value [-1,1].
        desired_au: AU value, shape [batch, 17], value [0,1].
    :return:
        fake_img: RGB generate face, shape [batch, 128, 128, 3], value [-1,1].
        fake_mask: face mask, shape [batch, 128, 128, 1], value [0,1].
    '''
    with tf.variable_scope('generator') as scope:
        if reuse:
            scope.reuse_variables()
            
        desired_au = tf.expand_dims(desired_au, axis=1, name='ExpandDims1')
        desired_au = tf.expand_dims(desired_au, axis=2, name='ExpandDims2')
        desired_au = tf.tile(desired_au, multiples=[1,128,128,1], name='Tile')
        x = tf.concat([real_img, desired_au], axis=3, name='Concat')
        
        x = conv2d(x, out_channels=64, kernel_size=7, strides=1, name='Conv1')
        x = instance_norm(x, name='InstNorm1')
        x = tf.nn.relu(x, name='ReLU1')

        x = conv2d(x, out_channels=128, kernel_size=4, strides=2, name='Conv2')
        x = instance_norm(x, name='InstNorm2')
        x = tf.nn.relu(x, name='ReLU2')

        x = conv2d(x, out_channels=256, kernel_size=4, strides=2, name='Conv3')
        x = instance_norm(x, name='InstNorm3')
        x = tf.nn.relu(x, name='ReLU3')

        for i in range(1, 7):
            x = res_block(x, out_channels=256, name='ResBlock'+str(i))

        x = deconv2d(x, out_channels=128, kernel_size=4, stride=2, name='Deconv1')
        x = instance_norm(x, name='InstNorm4')
        x = tf.nn.relu(x, name='ReLU4')

        x = deconv2d(x, out_channels=64, kernel_size=4, stride=2, name='Deconv2')
        x = instance_norm(x, name='InstNorm5')
        features = tf.nn.relu(x, name='ReLU5')

        x = conv2d(features, out_channels=3, kernel_size=7, strides=1, name='ConvImg')
        fake_img = tf.tanh(x, name='Tanh')

        x = conv2d(features, out_channels=1, kernel_size=7, strides=1, name='ConvMask')
        fake_mask = tf.sigmoid(x, name='Sigmoid')

        return fake_img, fake_mask
Пример #6
0
    def generator(self, z, reuse=None):
        with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
            #Input layer
            x_1 = tf.layers.dense(z, 4 * 4 * 64)

            #1º generator
            x_1 = tf.reshape(x_1, shape=[-1, 4, 4, 64])

            x_1 = lay.deconv2d(x_1, f=64, k=4, name='g-conv2d-0')
            x_1 = tf.nn.leaky_relu(x_1, alpha=0.1)

            x_1 = lay.deconv2d(x_1, f=64, name='g-conv2d-1')
            x_1 = tf.nn.leaky_relu(x_1, alpha=0.1)

            g1 = tf.nn.tanh(x_1)

            #2º generator
            x_2 = lay.deconv2d(x_1, f=32, name='g-conv2d-2')
            x_2 = tf.nn.leaky_relu(x_2, alpha=0.1)

            x_2 = lay.deconv2d(x_2, f=32, name='g-conv2d-3')
            x_2 = tf.nn.leaky_relu(x_2, alpha=0.1)

            g2 = tf.nn.tanh(x_2)

            #3º generator
            x_3 = lay.deconv2d(x_2, f=16, name='g-conv2d-4')
            x_3 = tf.nn.leaky_relu(x_3, alpha=0.1)

            x_3 = lay.deconv2d(x_3, f=16, name='g-conv2d-5')
            x_3 = tf.nn.leaky_relu(x_3, alpha=0.1)

            g3 = tf.nn.tanh(x_3)

            #4º generator
            x_4 = lay.deconv2d(x_3, f=8, name='g-conv2d-6')
            x_4 = tf.nn.leaky_relu(x_4, alpha=0.1)

            x_4 = lay.deconv2d(x_4, f=8, name='g-conv2d-7')
            x_4 = tf.nn.leaky_relu(x_4, alpha=0.1)

            #Output
            x_out = lay.deconv2d(x_4, f=3, name='g-conv2d-8')
            g4 = tf.nn.tanh(x_out)

            return [g1, g2, g3, g4]
Пример #7
0
 def up_sample(self, inputs, numOut, pool_size=2, name='upsample'):
     with tf.name_scope('upsample'):
         kernel = tf.Variable(
             tf.contrib.layers.xavier_initializer(uniform=False)([
                 pool_size, pool_size, numOut,
                 inputs.get_shape().as_list()[3]
             ]),
             name='weights')
         #wd = weight_variable_devonc([pool_size, pool_size, numOut// 2, numOut], stddev)
         #bd = bias_variable([features // 2])
         h_deconv = tf.nn.relu(deconv2d(inputs, kernel, pool_size))
     return h_deconv
Пример #8
0
def build_generator_resnet_9blocks(inputgen, name="generator"):
    '''The shape of input should be equal to the shape of output.'''
    pad_input = fluid.layers.pad2d(inputgen, [3, 3, 3, 3], mode="reflect")
    o_c1 = conv2d(pad_input, 32, 7, 1, 0.02, name=name + "_c1")
    o_c2 = conv2d(o_c1, 64, 3, 2, 0.02, "SAME", name + "_c2")
    o_c3 = conv2d(o_c2, 128, 3, 2, 0.02, "SAME", name + "_c3")
    o_r1 = build_resnet_block(o_c3, 128, name + "_r1")
    o_r2 = build_resnet_block(o_r1, 128, name + "_r2")
    o_r3 = build_resnet_block(o_r2, 128, name + "_r3")
    o_r4 = build_resnet_block(o_r3, 128, name + "_r4")
    o_r5 = build_resnet_block(o_r4, 128, name + "_r5")
    o_r6 = build_resnet_block(o_r5, 128, name + "_r6")
    o_r7 = build_resnet_block(o_r6, 128, name + "_r7")
    o_r8 = build_resnet_block(o_r7, 128, name + "_r8")
    o_r9 = build_resnet_block(o_r8, 128, name + "_r9")
    o_c4 = deconv2d(o_r9, [128, 128], 64, 3, 2, 0.02, "SAME", name + "_c4")
    o_c5 = deconv2d(o_c4, [256, 256], 32, 3, 2, 0.02, "SAME", name + "_c5")
    o_c6 = conv2d(o_c5, 3, 7, 1, 0.02, "SAME", name + "_c6", relu=False)

    out_gen = fluid.layers.tanh(o_c6, name + "_t1")
    return out_gen
Пример #9
0
    def generator(self, z, reuse=None):
        with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
            x = tf.layers.dense(z, units=self.fc_unit, name='g-fc-0')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = tf.layers.dense(x,
                                units=8 * 8 * self.gf_dim * 2,
                                name='g-fc-1')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = tf.reshape(x, shape=[-1, 8, 8, self.gf_dim * 2])

            x = lay.deconv2d(x, f=self.gf_dim, name='g-conv2d-0')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = lay.deconv2d(x, f=3, name='g-conv2d-1')
            x = tf.nn.tanh(x)

            return x
Пример #10
0
    def __call__(self, input):
        height = input.get_shape().as_list()[1]

        with tf.variable_scope(self.name, reuse=self.reuse):
            input = ly.conv2d(input, 64, name='g_conv2d_0')
            input = ly.bn_layer(input, name='g_bn_0')
            input = tf.nn.relu(input)

            ### resnet
            for i in range(16):
                cell = ly.conv2d(input, 64, name='g_res_conv2d_%s' % i)
                cell = ly.bn_layer(cell, name='g_res_bn_%s' % i)
                cell = input + cell
                input = cell

            ### deconv2d
            input = ly.conv2d(input, 256, name='g_conv2d_1')
            input = ly.deconv2d(input, 128, height * 2, strides=2, name='g_deconv2d_0')
            input = tf.nn.relu(input)

            input = ly.conv2d(input, 128, name='g_conv2d_2')
            input = ly.deconv2d(input, 64, height * 4, strides=2, name='g_deconv2d_1')
            input = tf.nn.relu(input)

            # ### Upsampling
            # input = ly.conv2d(input,256,name = 'g_conv2d_1')
            #
            # input = ly.Upsampling(input,height * 2)
            # input = ly.conv2d(input, 128,name = 'g_conv2d_2')
            # input = ly.bn_layer(input,name = 'g_bn_2')
            #
            # input = ly.Upsampling(input,height * 4)
            # input = ly.conv2d(input, 64,name = 'g_conv2d_3')
            # input = ly.bn_layer(input,name = 'g_bn_3')

            input = ly.conv2d(input, 3, name='g_conv2d_last')
            input = tf.nn.tanh(input)

            return input
Пример #11
0
    def u_net(self, x, layers=4, base_channel=64, train=True):
        ds_layers = {}
        ds_layer_shape = {}

        # down sample layers
        for layer in range(0, layers-1):
            f_channels = base_channel * (2**layer)
            layer_name = 'ds_{}'.format(layer)
            if layer == 0:
                x = conv2d(x, [3, 3, 3, f_channels], layer_name + '_1')
            else:
                x = conv2d(x, [3, 3, f_channels/2, f_channels], layer_name + '_1')

            x = conv2d(x, [3, 3, f_channels, f_channels], layer_name + '_2')
            ds_layers[layer] = x
            ds_layer_shape[layer] = tf.shape(x)

            x = maxpooling(x)

        # bottom layer
        f_channels = base_channel * (2**(layers-1))
        x = conv2d(x, [3, 3, f_channels/2, f_channels], 'bottom_1')
        x = conv2d(x, [3, 3, f_channels, f_channels], 'bottom_2')

        # up sample layers
        for layer in range(layers-2, -1, -1):
            f_channels = base_channel * (2**layer)
            layer_name = 'up_{}'.format(layer)
            x = deconv2d(x, [3, 3, f_channels, 2*f_channels], ds_layer_shape[layer], layer_name + '_deconv2d')

            # add the previous down sumple layer to the up sample layer
            x = concat(ds_layers[layer], x)

            x = conv2d(x, [3, 3, 2*f_channels, f_channels], layer_name + '_conv_1')
            x = conv2d(x, [3, 3, f_channels, f_channels], layer_name + '_conv_2')
            #if train:
            #    x = tf.nn.dropout(x, self.dropout)

        # add 1x1 convolution layer to change channel to one
        x = conv2d(x, [1, 1, base_channel, 1], 'conv_1x1', activation='no')

        logits = tf.squeeze(x, axis=3)

        return logits
Пример #12
0
def inference(inputs, num_classes=34, is_training=False):

    conv1_1 = layers.conv2d(inputs, 3, 64, name='conv1_1')
    conv1_2 = layers.conv2d(conv1_1, 3, 64, name='conv1_2')

    pool1 = layers.max_pool(conv1_2, name='pool1')

    conv2_1 = layers.conv2d(pool1, 3, 128, name='conv2_1')
    conv2_2 = layers.conv2d(conv2_1, 3, 128, name='conv2_2')

    pool2 = layers.max_pool(conv2_2, name='pool2')

    conv3_1 = layers.conv2d(pool2, 3, 256, name='conv3_1')
    conv3_2 = layers.conv2d(conv3_1, 3, 256, name='conv3_2')
    conv3_3 = layers.conv2d(conv3_2, 3, 256, name='conv3_3')

    pool3 = layers.max_pool(conv3_3, name='pool3')

    conv4_1 = layers.conv2d(pool3, 3, 512, name='conv4_1')
    conv4_2 = layers.conv2d(conv4_1, 3, 512, name='conv4_2')
    conv4_3 = layers.conv2d(conv4_2, 3, 512, name='conv4_3')

    pool4 = layers.max_pool(conv4_3, name='pool4')

    conv5_1 = layers.conv2d(pool4, 3, 512, name='conv5_1')
    conv5_2 = layers.conv2d(conv5_1, 3, 512, name='conv5_2')
    conv5_3 = layers.conv2d(conv5_2, 3, 512, name='conv5_3')

    pool5 = layers.max_pool(conv5_3, name='pool5')

    fc6 = layers.conv2d(pool5, 7, 4096, name='fc6')

    if is_training:
        fc6 = layers.dropout(fc6, keep_prob=0.5, name='drop6')

    fc7 = layers.conv2d(fc6, 1, 4096, name='fc7')

    if is_training:
        fc7 = layers.dropout(fc7, keep_prob=0.5, name='drop7')

    score_fr = layers.conv2d(fc7, 1, num_classes, name='score_fr')

    upscore2 = layers.deconv2d(score_fr,
                               4,
                               num_classes,
                               stride=2,
                               bias=False,
                               activation=None,
                               init='bilinear',
                               name='upscore2')

    score_pool4 = layers.conv2d(pool4,
                                1,
                                num_classes,
                                activation=None,
                                name='score_pool4')
    fuse_pool4 = tf.add(upscore2, score_pool4, name='fuse_pool4')
    upscore4 = layers.deconv2d(fuse_pool4,
                               4,
                               num_classes,
                               stride=2,
                               bias=False,
                               activation=None,
                               init='bilinear',
                               name='upscore4')

    score_pool3 = layers.conv2d(pool3,
                                1,
                                num_classes,
                                activation=None,
                                name='score_pool3')
    fuse_pool3 = tf.add(upscore4, score_pool3, name='fuse_pool3')
    upscore8 = layers.deconv2d(fuse_pool3,
                               16,
                               num_classes,
                               stride=8,
                               bias=False,
                               activation=None,
                               init='bilinear',
                               name='upscore8')

    return upscore8
Пример #13
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
        Creates a new convolutional unet for the given parametrization.
        :param x: input tensor, shape [?,nx,ny,channels]
        :param keep_prob: dropout probability tensor
        :param channels: number of channels in the input image
        :param n_class: number of output labels
        :param layers: number of layers in the net
        :param features_root: number of features in the first layer
        :param filter_size: size of the convolution filter
        :param pool_size: size of the max pooling operation
        :param summaries: Flag if summaries should be created
        """
    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 128
    size = in_size
    # Down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root  # output features number
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Set weights and bias of 2 convolution
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")
            # Build 2conv model
            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.leaky_relu(conv2)
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))
            # Do pooling and calculate image processing size
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]
    # Up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Up convolution and skip connection    # shape[kernelx, kernely, out features, in features]
            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.leaky_relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat
            # Set weights and bias of 2 convolution
            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")
            # Build 2conv model
            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.leaky_relu(conv2)
            up_h_convs[layer] = in_node
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
    # Output map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.leaky_relu(conv)
        up_h_convs["out"] = output_map

    # blur map
    with tf.name_scope("output_blur"):
        weight = weight_variable([1, 1, features_root, 1], stddev)
        bias = bias_variable([1], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_blur = tf.nn.leaky_relu(conv)
        up_h_convs["blur"] = output_blur

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)
    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, output_blur, variables, int(in_size - size)
Пример #14
0
def create_conv_net(x,
                    keep_prob,
                    channels_in,
                    channels_out,
                    n_class,
                    layers=2,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels_in]
    :param keep_prob: dropout probability tensor
    :param channels_in: number of channels in the input image
    :param channels_out: number of channels in the output image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels_in]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels_in, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:  #because after it's the end of the U
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_h_convs[
        layers - 1]  #it's the last layer the bottom of the U but it's because
    #of the definition of range we have layers -1 and not layers

    # up layers
    for layer in range(layers - 2, -1,
                       -1):  #we don't begin at the bottom of the U
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])  # weights and bias for upsampling
        #from a layer to another !!
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        #recall that in_node is the last layer
        #bottom of the U

        h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)  #layer
        #before the bottom of the  U
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_h_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class * channels_out],
                             stddev)
    bias = bias_variable([n_class * channels_out])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_h_convs["out"] = output_map

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Пример #15
0
def inference(inputs, num_classes=34, keep_prob=0.5, is_training=False):

    conv1_1 = layers.conv2d(inputs, ksize=3, depth=64, name='conv1_1')
    conv1_2 = layers.conv2d(conv1_1, ksize=3, depth=64, name='conv1_2')

    pool1 = layers.max_pool(conv1_2, ksize=3, stride=2, name='pool1')

    conv2_1 = layers.conv2d(pool1, ksize=3, depth=128, name='conv2_1')
    conv2_2 = layers.conv2d(conv2_1, ksize=3, depth=128, name='conv2_2')

    pool2 = layers.max_pool(conv2_2, ksize=3, stride=2, name='pool2')

    conv3_1 = layers.conv2d(pool2, ksize=3, depth=256, name='conv3_1')
    conv3_2 = layers.conv2d(conv3_1, ksize=3, depth=256, name='conv3_2')
    conv3_3 = layers.conv2d(conv3_2, ksize=3, depth=256, name='conv3_3')

    pool3 = layers.max_pool(conv3_3, ksize=3, stride=2, name='pool3')

    conv4_1 = layers.conv2d(pool3, ksize=3, depth=512, name='conv4_1')
    conv4_2 = layers.conv2d(conv4_1, ksize=3, depth=512, name='conv4_2')
    conv4_3 = layers.conv2d(conv4_2, ksize=3, depth=512, name='conv4_3')

    pool4 = layers.max_pool(conv4_3, ksize=3, stride=1, name='pool4')

    conv5_1 = layers.conv2d(pool4, ksize=3, depth=512, rate=2, name='conv5_1')
    conv5_2 = layers.conv2d(conv5_1,
                            ksize=3,
                            depth=512,
                            rate=2,
                            name='conv5_2')
    conv5_3 = layers.conv2d(conv5_2,
                            ksize=3,
                            depth=512,
                            rate=2,
                            name='conv5_3')

    pool5 = layers.max_pool(conv5_3, ksize=3, stride=1, name='pool5')

    # hole 6
    fc6_1 = layers.conv2d(pool5, ksize=3, depth=1024, rate=6, name='fc6_1')
    if is_training:
        fc6_1 = layers.dropout(fc6_1, keep_prob=keep_prob, name='drop6_1')

    fc7_1 = layers.conv2d(fc6_1, ksize=1, depth=1024, name='fc7_1')
    if is_training:
        fc7_1 = layers.dropout(fc7_1, keep_prob=keep_prob, name='drop7_1')

    fc8_1 = layers.conv2d(fc7_1,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_1')

    # hole 12
    fc6_2 = layers.conv2d(pool5, ksize=3, depth=1024, rate=12, name='fc6_2')
    if is_training:
        fc6_2 = layers.dropout(fc6_2, keep_prob=keep_prob, name='drop6_2')

    fc7_2 = layers.conv2d(fc6_2, ksize=1, depth=1024, name='fc7_2')
    if is_training:
        fc7_2 = layers.dropout(fc7_2, keep_prob=keep_prob, name='drop7_2')

    fc8_2 = layers.conv2d(fc7_2,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_2')

    # hole 18
    fc6_3 = layers.conv2d(pool5, ksize=3, depth=1024, rate=18, name='fc6_3')
    if is_training:
        fc6_3 = layers.dropout(fc6_3, keep_prob=keep_prob, name='drop6_3')

    fc7_3 = layers.conv2d(fc6_3, ksize=1, depth=1024, name='fc7_3')
    if is_training:
        fc7_3 = layers.dropout(fc7_3, keep_prob=keep_prob, name='drop7_3')

    fc8_3 = layers.conv2d(fc7_3,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_3')

    #hole 24
    fc6_4 = layers.conv2d(pool5, ksize=3, depth=1024, rate=24, name='fc6_4')
    if is_training:
        fc6_4 = layers.dropout(fc6_4, keep_prob=keep_prob, name='drop6_4')

    fc7_4 = layers.conv2d(fc6_4, ksize=1, depth=1024, name='fc7_4')
    if is_training:
        fc7_4 = layers.dropout(fc7_4, keep_prob=keep_prob, name='drop7_4')

    fc8_4 = layers.conv2d(fc7_4,
                          ksize=1,
                          depth=num_classes,
                          activation=None,
                          name='fc8_4')

    fuse = tf.add_n([fc8_1, fc8_2, fc8_3, fc8_4], name='add')

    logits = layers.deconv2d(fuse,
                             16,
                             num_classes,
                             stride=8,
                             bias=False,
                             activation=None,
                             init='bilinear',
                             name='logits')

    return logits
Пример #16
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """ Creates a new convolutional unet for the given parametrization.
    ### Params:
        * x - Tensor: shape [batch_size, height, width, channels]
        * keep_prob - float: dropout probability
        * channels - integer: number of channels in the input image
        * n_class - integer: number of output labels
        * layers - integer: number of layers in the net
        * features_root - integer: number of features in the first layer
        * filter_size - integer: size of the convolution filter
        * pool_size - integer: size of the max pooling operation
        * summaries - bool: Flag if summaries should be created
    """

    logger.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))

    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_convs = OrderedDict()
    up_convs = OrderedDict()

    # Record the size difference
    in_size = 1000
    size = in_size

    # Encode
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:
            pools[layer] = max_pool(dw_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_convs[layers - 1]

    # Decode
    for layer in range(layers - 2, -1, -1):
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        h_deconv_concat = crop_and_concat(dw_convs[layer], h_deconv)
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class], stddev)
    bias = bias_variable([n_class])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_convs["out"] = output_map

    # Summary the results of convolution and pooling
    if summaries:
        with tf.name_scope("summary_conv"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('layer_%02d_01' % i, get_image_summary(c1))
                tf.summary.image('layer_%02d_02' % i, get_image_summary(c2))

        with tf.name_scope("summary_max_pooling"):
            for k in pools.keys():
                tf.summary.image('pool_%02d' % k, get_image_summary(pools[k]))

        with tf.name_scope("summary_deconv"):
            for k in deconv.keys():
                tf.summary.image('deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

        with tf.name_scope("down_convolution"):
            for k in dw_convs.keys():
                tf.summary.histogram("layer_%02d" % k + '/activations',
                                     dw_convs[k])

        with tf.name_scope("up_convolution"):
            for k in up_convs.keys():
                tf.summary.histogram("layer_%s" % k + '/activations',
                                     up_convs[k])

    # Record all the variables which can be used in L2 regularization
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Пример #17
0
def create_UNet_edge(x,
                     keep_prob,
                     channels,
                     n_class,
                     layers=5,
                     features_root=32,
                     summaries=True,
                     training=True):
    # Inception-conv UNet with deep supervision
    logging.info("Layers {layers}, features {features}".format(
        layers=layers, features=features_root))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    fat_inputs = OrderedDict()
    fat_pools = OrderedDict()
    fat_dw_h_convs = OrderedDict()
    deconv = OrderedDict()
    up_h_convs = OrderedDict()

    # rmvd train
    # in_node, excitation = rmvd_layer(in_node, 8, 2, name="rmvd_training")

    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            if layer == 0:
                conv = inception_conv(in_node, channels, features, keep_prob,
                                      training)
            else:
                conv = inception_conv(in_node, features // 2, features,
                                      keep_prob, training)

            #conv = cSE_layer(conv, features, ratio=8, name="down_conv_{}".format(layer))
            fat_dw_h_convs[layer] = conv

            if layer < layers - 1:
                fat_pools[layer] = max_pool(conv, 2)
                in_node = fat_pools[layer]

    in_node = fat_dw_h_convs[layers - 1]
    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            h_deconv = deconv2d(in_node, features, features // 2, training)
            h_deconv_concat = tf.concat([h_deconv, fat_dw_h_convs[layer]], 3)

            deconv[layer] = h_deconv_concat
            in_node = inception_conv(h_deconv_concat, features, features // 2,
                                     keep_prob, training)
            #in_node = cSE_layer(in_node, features//2, ratio=4, name="up_conv_{}".format(layer))
            up_h_convs[layer] = in_node

    stddev = np.sqrt(2 / (3**2 * features_root))
    w = weight_variable([3, 3, features_root, 2], stddev, name="w")
    b = bias_variable([2], name="b")
    up_h_convs["out"] = tf.nn.bias_add(
        tf.nn.conv2d(up_h_convs[0], w, strides=[1, 1, 1, 1], padding="SAME"),
        b)

    # RCF
    for layer in range(3, 0, -1):
        with tf.name_scope("output_{}".format(str(layer))):
            in_node = up_h_convs[layer]
            conv = conv2d_2(in_node, features_root * 2**layer, 2, keep_prob)
            deconv = deconv2d_2(conv, 2, 2, 2**layer, keep_prob)
            up_h_convs["out_{}".format(layer)] = deconv

    in_node = tf.concat([
        up_h_convs["out"], up_h_convs["out_1"], up_h_convs["out_2"],
        up_h_convs["out_3"]
    ], 3)
    w_out = weight_variable([1, 1, 8, 2], stddev, name="w_out")
    b_out = bias_variable([2], name="b_out")
    output_map = tf.nn.bias_add(
        tf.nn.conv2d(in_node, w_out, strides=[1, 1, 1, 1], padding="SAME"),
        b_out)

    if summaries:
        with tf.name_scope("summaries"):
            for k in fat_pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(fat_pools[k]))

            tf.summary.image('summary_out_0',
                             get_image_summary(up_h_convs["out"], 1))
            tf.summary.image('summary_out_1',
                             get_image_summary(up_h_convs["out_1"], 1))
            tf.summary.image('summary_out_2',
                             get_image_summary(up_h_convs["out_2"], 1))
            tf.summary.image('summary_out_3',
                             get_image_summary(up_h_convs["out_3"], 1))

            for k in fat_dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations',
                    fat_dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return [
        output_map, up_h_convs["out"], up_h_convs["out_1"],
        up_h_convs["out_2"], up_h_convs["out_3"]
    ], variables
Пример #18
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=5,
                    features_root=32,
                    summaries=True,
                    training=True):
    # Conventinal UNet
    logging.info("Layers {layers}, features {features}".format(
        layers=layers, features=features_root))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    # rmvd train
    in_node, excitation = rmvd_layer(in_node, 8, 2, name="rmvd_training")

    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            if layer == 0:
                conv1 = conv2d(in_node, channels, features, keep_prob,
                               training)
            else:
                conv1 = conv2d(in_node, features // 2, features, keep_prob,
                               training)

            conv2 = conv2d(conv1, features, features, keep_prob, training)
            dw_h_convs[layer] = conv2

            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], 2)
                in_node = pools[layer]

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            h_deconv = deconv2d(in_node, features, features // 2, training)
            h_deconv_concat = tf.concat([dw_h_convs[layer], h_deconv], 3)
            deconv[layer] = h_deconv_concat

            conv1 = conv2d(h_deconv_concat, features, features // 2, keep_prob,
                           training)
            conv2 = conv2d(conv1, features // 2, features // 2, keep_prob,
                           training)
            in_node = conv2
            up_h_convs[layer] = in_node

    stddev = np.sqrt(2 / (3**2 * features_root))
    w = weight_variable([3, 3, features_root, 2], stddev, name="w")
    b = bias_variable([2], name="b")
    output_map = tf.nn.bias_add(
        tf.nn.conv2d(up_h_convs[0], w, strides=[1, 1, 1, 1], padding="SAME"),
        b)
    up_h_convs["out"] = output_map

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return [output_map], variables
Пример #19
0
    def define_model(self, input: tf.Tensor) -> tf.Tensor:
        """Take input tensor, return logits tensor using Unet_Inj architecture.
        Args:
            input (tf.Tensor): input tensor. Shape (n, s, s, n_channel)
        Returns:
            tf.Tensor: logit tensor. Shape (n, s-off, s-off, n_class).
                where off represents a cutoff due to valid padding effects.
        """

        # Split into max projection and dapi images
        in_node, dapi = tf.split(input, 2, axis=3)

        # Encoding architecture parameters
        layers = self.net_kwargs.get("layers", 3)
        feat_factor = self.net_kwargs.get("feat_factor", 2)
        features_root = self.net_kwargs.get("features_root", 16)
        filter_size = self.net_kwargs.get("filter_size", 3)
        rate = self.net_kwargs.get("rate", 0.1)
        pool_size = self.net_kwargs.get("pool_size", 2)

        # Encoding process
        dw_h_convs = OrderedDict()
        for layer in range(0, layers):
            with tf.name_scope("down_conv_{}".format(str(layer))):
                features = int((feat_factor**layer) * features_root)
                conv1 = prelu(conv2d(in_node, features, filter_size))
                conv1 = dropout_bn(conv1, rate)
                conv2 = prelu(conv2d(conv1, features, filter_size))
                conv2 = dropout_bn(conv2, rate)
                dw_h_convs[layer] = conv2
                if layer < layers - 1:
                    in_node = max_pool(dw_h_convs[layer], pool_size)
        in_node = dw_h_convs[layers - 1]

        # Parameters for Dapi signal introduction
        dapiFeat = self.net_kwargs.get("dapiFeat", 8)
        dapi_position = self.net_kwargs.get("dapi_position", "last")
        joinType = self.net_kwargs.get("joinType", "concat")

        # Decoding process, including dapi signal addition
        for layer in range(layers - 2, -1, -1):
            with tf.name_scope("up_conv_{}".format(str(layer))):
                # Deconvolve and concatenate to stored layer
                features = int(feat_factor**(layer + 1) * features_root)
                deconv1 = prelu(deconv2d(in_node, features, pool_size))
                deconv1 = dropout_bn(deconv1, rate)
                f = int((feat_factor**layer) * features_root)
                deconv2 = crop_concat(dw_h_convs[layer], deconv1, f)

                conv1 = prelu(conv2d(deconv2, features // 2, filter_size))
                conv1 = dropout_bn(conv1, rate)
                if layer == 0 and dapi_position == "second":
                    dapi = dapi_process(dapi, dapiFeat, filter_size, rate)
                    conv1 = dapi_add(dapi, conv1, joinType, dapiFeat)

                conv2 = prelu(conv2d(conv1, features // 2, filter_size))
                conv2 = dropout_bn(conv2, rate)
                if layer == 0 and dapi_position == "last":
                    dapi = dapi_process(dapi, dapiFeat, filter_size, rate)
                    conv2 = dapi_add(dapi, conv2, joinType, dapiFeat)
                in_node = conv2

        # Output Map
        with tf.name_scope("output_map"):
            logits = conv2d(in_node, self.n_class, 1, dtype=tf.float32)
        return logits
    def __call__(self, input):
        ### input --> image
        ### input_shape : [ -1, 128,128,3 ]
        input_shape = input.get_shape().as_list()
        input_channel = input_shape[-1]
        with tf.variable_scope(self.name, reuse=self.reuse):
            height = input.get_shape().as_list()[1]

            ### 提取特征 防止图片边缘奇异
            input = tf.pad(input,
                           paddings=[[0, 0], [3, 3], [3, 3], [0, 0]],
                           mode='REFLECT')
            input = ly.conv2d(input,
                              64,
                              kernel_size=7,
                              strides=1,
                              name='g_conv2d_0')
            input = ly.bn_layer(input, name='g_bn_0')
            input = tf.nn.relu(input)

            input = ly.conv2d(input,
                              128,
                              kernel_size=3,
                              strides=2,
                              name='g_conv2d_1')
            input = ly.bn_layer(input, name='g_bn_1')
            input = tf.nn.relu(input)

            input = ly.conv2d(input,
                              256,
                              kernel_size=3,
                              strides=2,
                              name='g_conv2d_2')
            input = ly.bn_layer(input, name='g_bn_2')
            input = tf.nn.relu(input)

            ### resnet
            for i in range(8):
                cell = ly.conv2d(input,
                                 256,
                                 kernel_size=3,
                                 strides=1,
                                 name='g_conv2d_res_%s' % i)
                cell = ly.bn_layer(cell, name='g_res_%s' % i)
                cell = tf.nn.relu(cell)
                input = cell

            low_height = math.ceil((height + 6.) / 4)

            input = ly.deconv2d(input,
                                128,
                                low_height * 2,
                                kernel_size=3,
                                strides=2,
                                name='g_deconv2d_0')
            input = ly.bn_layer(input, name='g_bn_3')
            input = tf.nn.relu(input)

            input = ly.deconv2d(input,
                                64,
                                low_height * 4,
                                kernel_size=3,
                                strides=2,
                                name='g_deconv2d_1')
            input = ly.bn_layer(input, name='g_bn_4')
            input = tf.nn.relu(input)

            input = ly.conv2d(input,
                              3,
                              kernel_size=7,
                              strides=1,
                              name='g_conv2d_3')
            input = ly.bn_layer(input, name='g_bn_5')
            input = tf.nn.tanh(input)

            input = tf.image.resize_images(input, [height, height])

        self.reuse = True

        return input
def create_network(x,
                   keep_prob,
                   padding=False,
                   resolution=3,
                   features_root=16,
                   channels=3,
                   filter_size=3,
                   deconv_size=2,
                   layers_per_transpose=2,
                   summaries=True):
    """
    :param x: input tensor, shape [?,width,height,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in input image
    :param padding: boolean, True if inputs are padded before convolution
    :param resolution: corresponds to how large the output should be relative to the input
    :param features_root: number of features in the first layer
    :param filter_size: size of convolution filter
    :param deconv_size: size of deconv strides
    :param summaries: flag if summaries should be created
    """

    logging.info(
        "Resolution x{resolution}, features {features}, filter size {filter_size}x{filter_size}"
        .format(resolution=resolution,
                features=features_root,
                filter_size=filter_size))

    with tf.name_scope("preprocessing"):
        width = x.shape[1]  #tf.shape(x)[1]
        height = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, width, height, channels]))
        in_node = tf.zeros([-1, width, height, channels],
                           tf.float32)  # dummy input

    max_size = min(1024, width * 2**(resolution - 1))

    weights = []
    biases = []
    convs = []
    outputs = []
    convsDict = OrderedDict()
    deconvsDict = OrderedDict()

    size = 128
    which_conv = 0
    which_up_conv = 0
    stddev = 0
    out_features = features_root
    in_features = channels
    while size < 1024:
        if size == width:
            in_node = x_image
        with tf.name_scope("Conv{}".format(str(size) + str(which_conv))):
            for layer in range(0, layers_per_transpose):
                if layer == 0:
                    in_features = channels
                    out_features = features_root
                else:
                    in_features = out_features
                    out_features = int(out_features / 2)  # change if necessary
                stddev = np.sqrt(2 / (filter_size**2 * out_features))
                if size < width or size >= max_size:
                    trainable = False
                else:
                    trainable = True
                w = weight_variable(
                    [filter_size, filter_size, in_features, out_features],
                    stddev,
                    name="w" + str(size) + str(layer),
                    trainable=trainable)
                b = bias_variable([out_features],
                                  name="b" + str(size) + str(layer),
                                  trainable=trainable)
                if padding:
                    in_node = tf.pad(in_node,
                                     paddings=[[0, 0], [1, 1], [1, 1], [0, 0]],
                                     mode='SYMMETRIC')
                conv = conv2d(in_node, w, b, keep_prob)
                convsDict[which_conv] = tf.nn.relu(conv)
                in_node = convsDict[which_conv]
                if trainable:
                    weights.append(w)
                    biases.append(b)
                convs.append(conv)
                which_conv += 1
        # Upscalings...
        with tf.name_scope("Up_Conv{}".format(str(size) + str(which_up_conv))):
            stddev = np.sqrt(2 / (filter_size**2 * out_features))
            if layers_per_transpose == 0:
                in_features = channels
            else:
                in_features = out_features
            wd = weight_variable(
                [deconv_size, deconv_size, in_features, out_features],
                stddev,
                name="wd" + str(size) + str(layer),
                trainable=trainable)
            bd = bias_variable([out_features],
                               name="bd" + str(size) + str(layer),
                               trainable=trainable)
            deconv = tf.nn.relu(deconv2d(in_node, wd, deconv_size) + bd)
            deconvsDict[which_up_conv] = deconv
            if trainable:
                weights.append(wd)
                biases.append(bd)
            in_node = deconv
            which_up_conv += 1
            size *= 2
        # Outputs...
        with tf.name_scope("Output{}".format(str(size))):
            if size < width or size > max_size:
                trainable = False
            else:
                trainable = True
            weight = weight_variable([1, 1, out_features, channels],
                                     stddev,
                                     name="wOut" + str(size) + str(layer),
                                     trainable=trainable)
            bias = bias_variable([channels],
                                 name="bOut" + str(size) + str(layer),
                                 trainable=trainable)
            conv = conv2d(in_node, weight, bias, tf.constant(1.0))
            output = tf.nn.relu(conv)
            if trainable:
                weights.append(weight)
                biases.append(bias)
            outputs.append(output)
            in_node = output
            convsDict["Output_" + str(size)] = output

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c) in enumerate(convs):
                tf.summary.image('summary_conv_%02d' % i, get_image_summary(c))
            for i in ({256, 512, 1024}):
                tf.summary.image(
                    'summary_output_' + str(i),
                    get_image_summary(convsDict["Output_" + str(i)]))
            for k in deconvsDict.keys():
                tf.summary.image('summary_deconv_%02d' % k,
                                 get_image_summary(deconvsDict[k]))

    variables = []
    for w in weights:
        variables.append(w)
    for b in biases:
        variables.append(b)

    return outputs, variables
Пример #22
0
def generator(inputgen, name="generator"):
    with tf.variable_scope(name):
        f = 7
        ks = 3

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           "REFLECT")
        norm1, o_c1 = conv2d(pad_input,
                             ngf,
                             f,
                             f,
                             1,
                             1,
                             0.02,
                             name="c1",
                             relufactor=0.2)
        norm2, o_c2 = conv2d(o_c1,
                             ngf * 2,
                             ks,
                             ks,
                             2,
                             2,
                             0.02,
                             "SAME",
                             "c2",
                             relufactor=0.2)
        norm3, o_c3 = conv2d(o_c2,
                             ngf * 4,
                             ks,
                             ks,
                             2,
                             2,
                             0.02,
                             "SAME",
                             "c3",
                             relufactor=0.2)

        o_r1 = residual(o_c3, ngf * 4, "r1")
        o_r2 = residual(o_r1, ngf * 4, "r2")
        o_r3 = residual(o_r2, ngf * 4, "r3")
        o_r4 = residual(o_r3, ngf * 4, "r4")
        o_r5 = residual(o_r4, ngf * 4, "r5")
        o_r6 = residual(o_r5, ngf * 4, "r6")
        o_r7 = residual(o_r6, ngf * 4, "r7")
        o_r8 = residual(o_r7, ngf * 4, "r8")
        o_r9 = residual(o_r8, ngf * 4, "r9")

        norm4, _ = deconv2d(o_r9, ngf * 2, ks, ks, 2, 2, 0.02, "SAME", "c4")
        o_c4_c2 = tf.concat(axis=3, values=[norm4, norm2], name="o_c4_c2")
        _, o_c4 = conv2d(o_c4_c2, ngf * 2, ks, ks, 1, 1, 0.02, "SAME",
                         "o_c4_merge")

        norm5, _ = deconv2d(o_c4, ngf, ks, ks, 2, 2, 0.02, "SAME", "c5")
        o_c5_c1 = tf.concat(axis=3, values=[norm5, norm1], name="o_c5_c1")
        _, o_c5 = conv2d(o_c5_c1, ngf, ks, ks, 1, 1, 0.02, "SAME",
                         "o_c5_merge")

        norm6, _ = conv2d(o_c5, img_layer, f, f, 1, 1, 0.02, "SAME", "c6")
        o_c6_input = tf.concat(axis=3,
                               values=[norm6, inputgen],
                               name="o_c6_input")
        _, o_c6 = conv2d(o_c6_input,
                         img_layer,
                         f,
                         f,
                         1,
                         1,
                         0.02,
                         "SAME",
                         "o_c6_merge",
                         do_relu=False)

        return tf.nn.tanh(o_c6)
    def __call__(self, input):  # 256 * 256 * 3
        with tf.variable_scope(self.name, reuse=self.reuse):

            e0 = ly.conv2d(input, 64, strides=2,
                           name='g_conv2d_0')  # 128 * 128 * 64
            e0 = ly.bn_layer(e0, name='g_bn_0')
            e0 = tf.nn.leaky_relu(e0)

            e1 = ly.conv2d(e0, 128, strides=2,
                           name='g_conv2d_1')  # 64 * 64 * 128
            e1 = ly.bn_layer(e1, name='g_bn_1')
            e1 = tf.nn.leaky_relu(e1)

            e2 = ly.conv2d(e1, 256, strides=2,
                           name='g_conv2d_2')  # 32 * 32 * 256
            e2 = ly.bn_layer(e2, name='g_bn_2')
            e2 = tf.nn.leaky_relu(e2)

            e3 = ly.conv2d(e2, 512, strides=2,
                           name='g_conv2d_3')  # 16 * 16 * 512
            e3 = ly.bn_layer(e3, name='g_bn_3')
            e3 = tf.nn.leaky_relu(e3)

            e4 = ly.conv2d(e3, 512, strides=2,
                           name='g_conv2d_4')  # 8 * 8 * 512
            e4 = ly.bn_layer(e4, name='g_bn_4')
            e4 = tf.nn.leaky_relu(e4)

            e5 = ly.conv2d(e4, 512, strides=2,
                           name='g_conv2d_5')  # 4 * 4 * 512
            e5 = ly.bn_layer(e5, name='g_bn_5')
            e5 = tf.nn.leaky_relu(e5)

            e6 = ly.conv2d(e5, 512, strides=2,
                           name='g_conv2d_6')  # 2 * 2 * 512
            e6 = ly.bn_layer(e6, name='g_bn_6')
            e6 = tf.nn.leaky_relu(e6)

            e7 = ly.conv2d(e6, 512, strides=2,
                           name='g_conv2d_7')  # 1 * 1 * 512
            e7 = ly.bn_layer(e7, name='g_bn_7')
            e7 = tf.nn.leaky_relu(e7)

            d1 = ly.deconv2d(e7, 512, 2, strides=2,
                             name='g_deconv2d_1')  # 2 * 2 * 512
            d1 = ly.bn_layer(d1, name='g_bn_8')
            d1 = tf.nn.dropout(tf.nn.leaky_relu(d1), 0.5)
            d1 = tf.concat([d1, e6], 3)

            d2 = ly.deconv2d(d1, 512, 4, strides=2,
                             name='g_deconv2d_2')  # 4 * 4 * 512
            d2 = ly.bn_layer(d2, name='g_bn_9')
            d2 = tf.nn.dropout(tf.nn.leaky_relu(d2), 0.5)
            d2 = tf.concat([d2, e5], 3)

            d3 = ly.deconv2d(d2, 512, 8, strides=2,
                             name='g_deconv2d_3')  # 8 * 8 * 512
            d3 = ly.bn_layer(d3, name='g_bn_10')
            d3 = tf.nn.dropout(tf.nn.leaky_relu(d3), 0.5)
            d3 = tf.concat([d3, e4], 3)

            d4 = ly.deconv2d(d3, 512, 16, strides=2,
                             name='g_deconv2d_4')  # 16 * 16 * 512
            d4 = ly.bn_layer(d4, name='g_bn_11')
            d4 = tf.nn.leaky_relu(d4)
            d4 = tf.concat([d4, e3], 3)

            d5 = ly.deconv2d(d4, 256, 32, strides=2,
                             name='g_deconv2d_5')  # 32 * 32 * 256
            d5 = ly.bn_layer(d5, name='g_bn_12')
            d5 = tf.nn.leaky_relu(d5)
            d5 = tf.concat([d5, e2], 3)

            d6 = ly.deconv2d(d5, 128, 64, strides=2,
                             name='g_deconv2d_6')  # 64 * 64 * 128
            d6 = ly.bn_layer(d6, name='g_bn_13')
            d6 = tf.nn.leaky_relu(d6)
            d6 = tf.concat([d6, e1], 3)

            d7 = ly.deconv2d(d6, 64, 128, strides=2,
                             name='g_deconv2d_7')  # 128 * 128 * 64
            d7 = ly.bn_layer(d7, name='g_bn_14')
            d7 = tf.nn.leaky_relu(d7)
            d7 = tf.concat([d7, e0], 3)

            d7 = ly.deconv2d(d7, 3, 256, strides=2,
                             name='g_deconv2d_8')  # 256 * 256 * 3
            d7 = tf.nn.tanh(d7)

        return d7
Пример #24
0
def generator(inputgen, name="generator"):
    '''
    build the generator
    :param inputgen: input tensor
    :param name: operation name
    :return: tensor
    '''
    with tf.variable_scope(name):
        f = 7
        ks = 3

        H, W = inputgen.get_shape().as_list()[1:3]  # 图像的高和宽
        scale = 2  # 图像下采样尺度
        num_blocks = 2  # 图像块个数
        imgs = [inputgen]  # 存储不同尺寸的图像块
        conv_blocks = []  # 用于存储图像块的卷积结果

        for iter in range(num_blocks):
            img_block = resize(images=inputgen,
                               size=[
                                   H // (scale * pow(2, iter)),
                                   W // (scale * pow(2, iter))
                               ])
            imgs.append(img_block)

        for i in range(len(imgs)):
            conv_block = conv2d(imgs[i],
                                ngf * pow(2, i),
                                f,
                                f,
                                1,
                                1,
                                "SAME",
                                "c" + str(i + 1) + "_1",
                                do_norm=False)
            conv_block = conv2d(conv_block, ngf * pow(2, i), ks, ks, 1, 1,
                                "SAME", "c" + str(i + 1) + "_2")
            conv_block = conv2d(conv_block, ngf * pow(2, i), ks, ks, 1, 1,
                                "SAME", "c" + str(i + 1) + "_3")
            conv_block = conv2d(conv_block, ngf * pow(2, i), ks, ks, 1, 1,
                                "SAME", "c" + str(i + 1) + "_4")
            conv_block = conv2d(conv_block, ngf * pow(2, i), ks, ks, 1, 1,
                                "SAME", "c" + str(i + 1) + "_5")
            conv_blocks.append(conv_block)

        deconv = deconv2d(conv_blocks[2], conv_blocks[1].get_shape()[-1], ks,
                          ks, 2, 2, "SAME", "dc4")
        tensor = tf.concat(values=[deconv, conv_blocks[1]], axis=3)

        deconv = deconv2d(tensor, conv_blocks[0].get_shape()[-1], ks, ks, 2, 2,
                          "SAME", "dc5")
        tensor = tf.concat(values=[deconv, conv_blocks[0]], axis=3)

        img_256_3 = conv2d(tensor, img_layer, ks, ks, 1, 1, "SAME", "dc6")
        tensor_256_6 = tf.concat(values=[img_256_3, inputgen], axis=3)
        img = conv2d(tensor_256_6,
                     img_layer,
                     ks,
                     ks,
                     1,
                     1,
                     "SAME",
                     "dc7",
                     do_relu=False)

        outputgen = tanh(img)

        return outputgen
Пример #25
0
# cell_1 = ConvGRUCell(shape, 3, kernel_1, initializer=tf.truncated_normal_initializer(stddev=0.01))
# print(cell_1)
# outputs_1, state_1 = tf.nn.dynamic_rnn(cell_1, outputs, dtype=inputs.dtype, time_major=True)  #
# print(outputs_1)

with tf.variable_scope('cell_1', reuse=True):
     cell_2 = ConvGRUCell(shape, 3, kernel_1)#, reuse=True)
# print(cell_1_2)
     outputs_2, state_2 = tf.nn.dynamic_rnn(cell_2, outputs, initial_state=state_1, dtype=inputs.dtype, time_major=True)
print(outputs_2)
print(state_2)
#if outputs_2 != outputs_1:
#print(outputs_2)
#print(outputs_2[0])
output_deconv = deconv2d(outputs_2[0], [32, 448, 448, 3])
shape = [output_deconv.shape[0], output_deconv.shape[1]*2, output_deconv.shape[2]*2, output_deconv.shape[3]]


# with tf.Session() as sess:
#      sess.run(tf.initialize_all_variables())
#      sess.run(outputs_1.name)
#      sess.run(outputs_1)
#      sess.run(outputs_2.name)
#      sess.run(outputs_2)


#print(shape)
#

Пример #26
0
    def generator(self, name, input, is_training=True):
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            ### 3 -> 64 64
            e0 = ly.conv2d(input, 64, strides=2, name='g_conv2d_0')
            e0 = ly.batch_normal(e0, name='g_bn_0', is_training=is_training)
            e0 = ly.relu(e0, alpha=0.2)

            ### 64 -> 128 32
            e1 = ly.conv2d(e0, 128, strides=2, name='g_conv2d_1')
            e1 = ly.batch_normal(e1, name='g_bn_1', is_training=is_training)
            e1 = ly.relu(e1, alpha=0.2)

            ### 128 -> 256 16
            e2 = ly.conv2d(e1, 256, strides=2, name='g_conv2d_2')
            e2 = ly.batch_normal(e2, name='g_bn_2', is_training=is_training)
            e2 = ly.relu(e2, alpha=0.2)

            ### 256 -> 512 8
            e3 = ly.conv2d(e2, 512, strides=2, name='g_conv2d_3')
            e3 = ly.batch_normal(e3, name='g_bn_3', is_training=is_training)
            e3 = ly.relu(e3, alpha=0.2)

            ### 512 -> 512 4
            e4 = ly.conv2d(e3, 512, strides=2, name='g_conv2d_4')
            e4 = ly.batch_normal(e4, name='g_bn_4', is_training=is_training)
            e4 = ly.relu(e4, alpha=0.2)

            ### 512 -> 512 2
            e5 = ly.conv2d(e4, 512, strides=2, name='g_conv2d_5')
            e5 = ly.batch_normal(e5, name='g_bn_5', is_training=is_training)
            e5 = ly.relu(e5, alpha=0.2)

            ### 512 -> 512 4
            d1 = ly.deconv2d(e5, 512, strides=2, name='g_deconv2d_1')
            d1 = ly.batch_normal(d1, name='g_bn_6', is_training=is_training)
            d1 = tf.nn.dropout(d1, keep_prob=0.5)
            d1 = tf.concat([d1, e4], axis=3)
            d1 = ly.relu(d1, alpha=0.2)

            ### 512 -> 512 8
            d2 = ly.deconv2d(d1, 512, strides=2, name='g_deconv2d_2')
            d2 = ly.batch_normal(d2, name='g_bn_7', is_training=is_training)
            d2 = tf.nn.dropout(d2, keep_prob=0.5)
            d2 = ly.relu(d2, alpha=0.2)
            d2 = tf.concat([d2, e3], axis=3)

            ### 512 -> 256 16
            d3 = ly.deconv2d(d2, 256, strides=2, name='g_deconv2d_3')
            d3 = ly.batch_normal(d3, name='g_bn_8', is_training=is_training)
            d3 = ly.relu(d3, alpha=0.2)
            d3 = tf.concat([d3, e2], axis=3)

            ### 256 -> 128 32
            d4 = ly.deconv2d(d3, 128, strides=2, name='g_deconv2d_4')
            d4 = ly.batch_normal(d4, name='g_bn_9', is_training=is_training)
            d4 = ly.relu(d4, alpha=0.2)
            d4 = tf.concat([d4, e1], axis=3)

            ### 128 -> 64 64
            d5 = ly.deconv2d(d4, 64, strides=2, name='g_deconv2d_5')
            d5 = ly.batch_normal(d5, name='g_bn_10', is_training=is_training)
            d5 = ly.relu(d5, alpha=0.2)
            d5 = tf.concat([d5, e0], axis=3)

            ### 64 -> 3 128
            d6 = ly.deconv2d(d5, 3, strides=2, name='g_deconv2d_6')
            d6 = ly.batch_normal(d6, name='g_bn_11', is_training=is_training)
            d6 = ly.relu(d6, alpha=0.2)

            return tf.nn.tanh(d6)