Exemplo n.º 1
0
def class_subnet(inputs, is_training):
    with tf.variable_scope("class_subnet", reuse=tf.AUTO_REUSE):
        for i in range(D_class):
            inputs = swish(
                batchnorm(
                    "bn1" + str(i),
                    conv("conv1" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
            inputs = swish(
                batchnorm(
                    "bn2" + str(i),
                    conv("conv2" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
            inputs = swish(
                batchnorm(
                    "bn3" + str(i),
                    conv("conv3" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
            inputs = swish(
                batchnorm(
                    "bn4" + str(i),
                    conv("conv4" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
        inputs = conv("conv5", inputs, K * A, 3, 1, "SAME", True)
        H, W = tf.shape(inputs)[1], tf.shape(inputs)[2]
        inputs = tf.reshape(inputs, [-1, H * W * A, K])
    return inputs
Exemplo n.º 2
0
def g_net(img, scope, gf_dim=64, is_training=True, reuse=False):
    global bn
    bn = functools.partial(bn, is_training=is_training)
    def res_block(x, dim, scope='res'):
        y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        y = relu(bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                        scope=scope + '_conv1'), scope=scope + '_bn1'))
        y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        y = bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                    scope=scope + '_conv2'), scope=scope + '_bn2')
        return y + x

    with tf.variable_scope(scope + '_g', reuse=reuse):
        c0 = tf.pad(img, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
        c1 = relu(bn(conv(c0, gf_dim, 7, 1, padding='VALID', scope='c1_conv'), scope='c1_bn'))
        c2 = relu(bn(conv(c1, gf_dim * 2, 3, 2, scope='c2_conv'), scope='c2_bn'))
        c3 = relu(bn(conv(c2, gf_dim * 4, 3, 2, scope='c3_conv'), scope='c3_bn'))

        r1 = res_block(c3, gf_dim * 4, scope='r1')
        r2 = res_block(r1, gf_dim * 4, scope='r2')
        r3 = res_block(r2, gf_dim * 4, scope='r3')
        r4 = res_block(r3, gf_dim * 4, scope='r4')
        r5 = res_block(r4, gf_dim * 4, scope='r5')
        r6 = res_block(r5, gf_dim * 4, scope='r6')
        r7 = res_block(r6, gf_dim * 4, scope='r7')
        r8 = res_block(r7, gf_dim * 4, scope='r8')
        r9 = res_block(r8, gf_dim * 4, scope='r9')

        d1 = relu(bn(deconv(r9, gf_dim * 2, 3, 2, scope='d1_dconv'), scope='d1_bn'))
        d2 = relu(bn(deconv(d1, gf_dim, 3, 2, scope='d2_dconv'), scope='d2_bn'))
        d2 = tf.pad(d2, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
        pred = conv(d2, 3, 7, 1, padding='VALID', scope='pred_conv')
        pred = tf.nn.tanh(pred)

    return pred
Exemplo n.º 3
0
    def pixel_discriminator(self,
                            input,
                            in_channels=3,
                            ndf=64,
                            norm_type='instance',
                            init_type='normal',
                            init_gain=1.0,
                            is_training=True,
                            sigmoid=None):
        """
            1x1 PatchGAN Discriminator (pixelGAN)
        """
        # 1x1 convolution with 64 filters and no normalization
        layer = ops.conv(input, in_channels=in_channels, out_channels=ndf, filter_size=1,
                         stride=1, weight_init_type=init_type, weight_init_gain=init_gain, norm_type=None,
                         activation_type='LeakyReLU', is_training=is_training, scope='layer0', reuse=self.reuse)

        # 1x1 convolution with 128 filters and instance normalization
        layer = ops.conv(layer, in_channels=ndf, out_channels=2*ndf, filter_size=1,
                         stride=1, weight_init_type=init_type, weight_init_gain=init_gain,
                         norm_type=norm_type, activation_type='LeakyReLU', is_training=is_training,
                         scope='layer1', reuse=self.reuse)

        # produces a single channel prediction map
        layer = ops.conv(layer, in_channels=2*ndf, out_channels=1, filter_size=1, stride=1,
                         weight_init_type=init_type, weight_init_gain=init_gain, norm_type=None,
                         activation_type=sigmoid, is_training=is_training, scope='layer2', reuse=self.reuse)

        return layer
Exemplo n.º 4
0
 def res_block(x, dim, scope='res'):
     y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
     y = relu(bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                     scope=scope + '_conv1'), scope=scope + '_bn1'))
     y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
     y = bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                 scope=scope + '_conv2'), scope=scope + '_bn2')
     return y + x
Exemplo n.º 5
0
 def _residule_block(x, dim, name):
     with tf.compat.v1.variable_scope(name):
         y = conv(x, dim, 3, 1, "conv1")
         y = lrelu(y)
         y = pixel_norm(y)
         y = conv(y, dim, 3, 1, "conv2")
         y = pixel_norm(y)
         return y + x
Exemplo n.º 6
0
def test_gated_horiz_mask_layer():
    print("===========================================")
    print("Gated Conv Layers: 1 x 3 gated horiz conv mask B (Multiple layers)")
    print("Grayscale [16 in, 16 out]")
    input_shape = [5, 5]
    kernel_shape = [1, 3]
    mask_type = "B"
    num_inputs, num_outputs, data_num_channels = 2, 2, 1

    expected_0 = np.array([[2., 4., 4., 4., 4.], [2., 4., 4., 4., 4.],
                           [2., 4., 4., 4., 4.], [2., 4., 4., 4., 4.],
                           [2., 4., 4., 4., 4.]])

    expected = expected_from_list([expected_0, expected_0])

    outputs = run(
        conv(create_ones_inputs(input_shape, num_inputs),
             num_outputs,
             kernel_shape,
             mask_type,
             data_num_channels,
             weights_initializer=tf.ones_initializer,
             scope="1x3_gated_horiz_conv_mask_B_Grayscale"))
    #print(matrix_to_string(outputs[0,:,:,0]))
    # invalid to crop_mask for A or B, only horiz or vertical mask can crop

    assert_equals(outputs, expected)
    print("-------------------------------------------")
    print("Color [3 in, 48 out]")
    num_inputs, num_outputs, data_num_channels = 3, 6, 3

    expected_0 = np.array([[1., 4., 4., 4., 4.], [1., 4., 4., 4., 4.],
                           [1., 4., 4., 4., 4.], [1., 4., 4., 4., 4.],
                           [1., 4., 4., 4., 4.]])

    expected_1 = np.array([[2., 5., 5., 5., 5.], [2., 5., 5., 5., 5.],
                           [2., 5., 5., 5., 5.], [2., 5., 5., 5., 5.],
                           [2., 5., 5., 5., 5.]])

    expected_2 = np.array([[3., 6., 6., 6., 6.], [3., 6., 6., 6., 6.],
                           [3., 6., 6., 6., 6.], [3., 6., 6., 6., 6.],
                           [3., 6., 6., 6., 6.]])

    expected = expected_from_list([expected_0, expected_1, expected_2] * 2)

    outputs = run(
        conv(create_ones_inputs(input_shape, num_inputs),
             num_outputs,
             kernel_shape,
             mask_type,
             data_num_channels,
             weights_initializer=tf.ones_initializer,
             scope="1x3_gated_horiz_conv_mask_B_Color"))

    #print(matrix_to_string(outputs[0,:,:,2]))
    # invalid to crop_mask for A or B, only horiz or vertical mask can crop

    assert_equals(outputs, expected)
Exemplo n.º 7
0
def test_last_layers():
    print("===========================================")
    print("Last Layers: 1 x 1 conv mask B (2 layers)")
    print("Grayscale - (No masking required) [32 in, 32 out]")
    input_shape = [5, 5]
    kernel_shape = [1, 1]
    mask_type = "B"
    num_inputs, num_outputs, data_num_channels = 2, 2, 1

    expected_0 = np.array([[2., 2., 2., 2., 2.], [2., 2., 2., 2., 2.],
                           [2., 2., 2., 2., 2.], [2., 2., 2., 2., 2.],
                           [2., 2., 2., 2., 2.]])

    expected = expected_from_list([expected_0, expected_0])

    outputs = run(
        conv(create_ones_inputs(input_shape, num_inputs),
             num_outputs,
             kernel_shape,
             mask_type,
             data_num_channels,
             weights_initializer=tf.ones_initializer,
             scope="1x1_conv_mask_B_Grayscale"))
    #print(matrix_to_string(outputs[0,:,:,0]))
    # invalid to crop_mask for A or B, only horiz or vertical mask can crop

    assert_equals(outputs, expected)
    print("-------------------------------------------")
    print("Color [3 in, 48 out]")
    num_inputs, num_outputs, data_num_channels = 3, 6, 3

    expected_0 = np.array([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.],
                           [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.],
                           [1., 1., 1., 1., 1.]])

    expected_1 = np.array([[2., 2., 2., 2., 2.], [2., 2., 2., 2., 2.],
                           [2., 2., 2., 2., 2.], [2., 2., 2., 2., 2.],
                           [2., 2., 2., 2., 2.]])

    expected_2 = np.array([[3., 3., 3., 3., 3.], [3., 3., 3., 3., 3.],
                           [3., 3., 3., 3., 3.], [3., 3., 3., 3., 3.],
                           [3., 3., 3., 3., 3.]])

    expected = expected_from_list([expected_0, expected_1, expected_2] * 2)

    outputs = run(
        conv(create_ones_inputs(input_shape, num_inputs),
             num_outputs,
             kernel_shape,
             mask_type,
             data_num_channels,
             weights_initializer=tf.ones_initializer,
             scope="1x1_conv_mask_B_Color"))
    #print(matrix_to_string(outputs[0,:,:,2]))
    # invalid to crop_mask for A or B, only horiz or vertical mask can crop

    assert_equals(outputs, expected)
Exemplo n.º 8
0
def class_subnet(inputs):
    with tf.variable_scope("class_subnet", reuse=tf.AUTO_REUSE):
        inputs = relu(conv("conv1", inputs, 256, 3, 1, "SAME"))
        inputs = relu(conv("conv2", inputs, 256, 3, 1, "SAME"))
        inputs = relu(conv("conv3", inputs, 256, 3, 1, "SAME"))
        inputs = relu(conv("conv4", inputs, 256, 3, 1, "SAME"))
        inputs = conv("conv5", inputs, K*A, 3, 1, "SAME", True)
        H, W = tf.shape(inputs)[1], tf.shape(inputs)[2]
        inputs = tf.reshape(inputs, [-1, H * W * A, K])
    return inputs
Exemplo n.º 9
0
def res_build_block_v1(X,
                       increase=False,
                       init=1.0,
                       stddev=1.0,
                       training=False,
                       projection=None,
                       name=None):
    print('input shape for {} :: {}'.format(name, X.get_shape().as_list()))
    shortcut_projection = X
    stride = 1
    in_channels = X.get_shape().as_list()[-1]
    out_channels = in_channels
    if increase:
        out_channels = 2 * in_channels
        stride = 2
        if projection:
            name_ = name + 'projection_batch_norm'
            shortcut_projection = projection(shortcut_projection,
                                             out_channels=out_channels,
                                             training=training,
                                             name=name_)
        else:
            shortcut_projection = maxpool(shortcut_projection,
                                          filter_size=[1, 2, 2, 1],
                                          stride_size=[1, 2, 2, 1],
                                          padding='VALID')
            pad = (out_channels - in_channels)
            shortcut_projection = tf.pad(shortcut_projection,
                                         [[0, 0], [0, 0], [0, 0], [0, pad]])
    with tf.name_scope('conv1layer'):
        name_ = name + 'conv1layer_batch_norm'
        X = conv(X,
                 filter_size=3,
                 out_channels=out_channels,
                 stride_size=stride,
                 padding='SAME',
                 init_bias=init,
                 stddev=stddev)
        X = batch_norm(X, training, name=name_)
        X = tf.nn.relu(X)
    with tf.name_scope('conv2layer'):
        name_ = name + 'conv2layer_batch_norm'
        X = conv(X,
                 filter_size=3,
                 out_channels=out_channels,
                 stride_size=1,
                 padding='SAME',
                 init_bias=init,
                 stddev=stddev)
        X = batch_norm(X, training, name=name_)
    with tf.name_scope('residual'):
        X += shortcut_projection
        X = tf.nn.relu(X)
    print('output shape for {} :: {}'.format(name, X.get_shape().as_list()))
    return X
Exemplo n.º 10
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name):
         inputs = tf.nn.relu(ops.conv("conv0", inputs, 64, 3, 1))
         for d in np.arange(1, config.DEPTH - 1):
             inputs = tf.nn.relu(
                 ops.batchnorm(
                     ops.conv("conv_" + str(d + 1), inputs, 64, 3, 1),
                     train_phase, "bn" + str(d)))
         inputs = ops.conv("conv" + str(config.DEPTH - 1), inputs,
                           config.IMG_C, 3, 1)
         return inputs
def inference(inputs, dropout_keep_prob, label_cnt):
    # todo: change lrn parameters
    # conv layer 1
    with tf.name_scope('conv1layer'):
        conv1 = op.conv(
            inputs, 7, 96,
            3)  # (input data, kernel size, #output channels, stride_size)
        #        conv1 = op.lrn(conv1)
        conv1 = tf.nn.max_pool3d(conv1,
                                 ksize=[1, 2, 2, 2, 1],
                                 strides=[1, 1, 1, 1, 1],
                                 padding='VALID')

    # conv layer 2
    with tf.name_scope('conv2layer'):
        conv2 = op.conv(conv1, 5, 256, 1, 1.0)
        #        conv2 = op.lrn(conv2)
        conv2 = tf.nn.max_pool3d(conv2,
                                 ksize=[1, 2, 2, 2, 1],
                                 strides=[1, 1, 1, 1, 1],
                                 padding='VALID')

    # conv layer 3
    with tf.name_scope('conv3layer'):
        conv3 = op.conv(conv2, 3, 384, 1)

    # conv layer 4
    with tf.name_scope('conv4layer'):
        conv4 = op.conv(conv3, 3, 384, 1, 1.0)

    # conv layer 5
    with tf.name_scope('conv5layer'):
        conv5 = op.conv(conv4, 3, 256, 1, 1.0)
        conv5 = tf.nn.max_pool3d(conv5,
                                 ksize=[1, 3, 3, 3, 1],
                                 strides=[1, 2, 2, 2, 1],
                                 padding='VALID')

    # fc layer 1
    with tf.name_scope('fc1layer'):
        fc1 = op.fc(conv5, 4096, 1.0)
        fc1 = tf.nn.dropout(fc1, dropout_keep_prob)

    # fc layer 2
    with tf.name_scope('fc2layer'):
        fc2 = op.fc(fc1, 4096, 1.0)
        fc2 = tf.nn.dropout(fc2, dropout_keep_prob)

    # fc layer 3 - output
    with tf.name_scope('fc3layer'):
        return op.fc(fc2, label_cnt, 1.0, None)
Exemplo n.º 12
0
def d_net(img, scope, df_dim=64, is_training=True, reuse=False):
    global bn
    bn = functools.partial(bn, is_training=is_training)
    with tf.variable_scope(scope + '_d', reuse=reuse):
        n = lrelu(conv(img, df_dim, kernel_size=4, stride=2, scope='conv1'))
        n = lrelu(bn(conv(n, df_dim * 2, kernel_size=4, stride=2, scope='conv2'), scope='bn1'))
        # (64 x 64 x df_dim*2)
        n = lrelu(bn(conv(n, df_dim * 4, kernel_size=4, stride=2, scope='conv3'), scope='bn2'))
        # (32x 32 x df_dim*4)
        n = lrelu(bn(conv(n, df_dim * 8, kernel_size=4, stride=1, scope='conv4'), scope='bn3'))
        # (32 x 32 x df_dim*8)
        n = conv(n, 1, kernel_size=4, stride=1, scope='conv5') 
        # (32 x 32 x 1)
    return n
Exemplo n.º 13
0
def test_masked_vert_mask_layer():
    print("===========================================")
    print("Gated Conv Layers: 1 x 3 vertical conv mask (Multiple layers)")
    print("Grayscale [16 in, 16 out]")
    input_shape = [5, 5]
    kernel_shape = [3, 3]
    num_inputs, num_outputs, data_num_channels = 2, 2, 1

    expected = np.array([[0., 0., 0., 0., 0.], [4., 6., 6., 6., 4.],
                         [4., 6., 6., 6., 4.], [4., 6., 6., 6., 4.],
                         [4., 6., 6., 6., 4.]])

    expected = expected_from_list([expected] * num_outputs)

    outputs = run(
        conv(create_ones_inputs(input_shape, num_inputs),
             num_outputs,
             kernel_shape,
             'V',
             data_num_channels,
             weights_initializer=tf.ones_initializer,
             scope="1x3_vert_conv_mask_Grayscale"))
    #print(matrix_to_string(outputs[0,:,:,0]))
    # invalid to crop_mask for A or B, only horiz or vertical mask can crop

    assert_equals(outputs, expected)
    print("-------------------------------------------")
    print("Color [3 in, 48 out]")
    num_inputs, num_outputs, data_num_channels = 3, 6, 3

    expected = np.array([[0., 0., 0., 0., 0.], [6., 9., 9., 9., 6.],
                         [6., 9., 9., 9., 6.], [6., 9., 9., 9., 6.],
                         [6., 9., 9., 9., 6.]])

    expected = expected_from_list([expected] * num_outputs)

    outputs = run(
        conv(create_ones_inputs(input_shape, num_inputs),
             num_outputs,
             kernel_shape,
             'V',
             data_num_channels,
             weights_initializer=tf.ones_initializer,
             scope="1x3_vert_conv_mask_Color"))
    #print(matrix_to_string(outputs[0,:,:,2]))
    # invalid to crop_mask for A or B, only horiz or vertical mask can crop

    assert_equals(outputs, expected)
Exemplo n.º 14
0
def network(X, dropout_keep_prob=0.8, label_cnt=1000, type_=16):  ####
    out_channels = 64
    layers16 = [2, 2, 3, 3, 3]
    layers19 = [2, 2, 4, 4, 4]
    if type_ == 19:
        layers = layers19
    else:
        layers = layers16
    for x in range(5):
        for y in range(layers[x]):
            layer_name = 'conv{}_{}layer'.format(x + 1, y + 1)
            with tf.name_scope(layer_name):
                X = op.conv(X,
                            filter_size=3,
                            out_channels=out_channels,
                            stride_size=1,
                            padding='SAME',
                            a=tf.nn.relu)
        X = tf.nn.max_pool(X,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')
        out_channels = 2 * out_channels

    with tf.name_scope('fc1layer'):
        X = op.fc(X, output_size=4096, a=tf.nn.relu)
    with tf.name_scope('fc2layer'):
        X = op.fc(X, output_size=4096, a=tf.nn.relu)
    with tf.name_scope('fc3layer'):
        X = op.fc(X, output_size=label_cnt, a=None)

    with tf.name_scope('softmaxlayer'):
        out_probs = tf.nn.softmax(logits=X, axis=-1, name='softmax_op')

        return X, out_probs
Exemplo n.º 15
0
def projection_(shortcut, out_channels, training, name):
    shortcut = conv(shortcut,
                    stride_size=2,
                    filter_size=1,
                    padding='VALID',
                    out_channels=out_channels)
    shortcut = batch_norm(shortcut, training, name=name)
    return shortcut
Exemplo n.º 16
0
def selfatt(input,
            I,
            input_channel,
            flag_I=True,
            sn=True,
            channel_fac=16,
            stride=1):
    ''' Use spectral normalization after every convolution layers '''
    with tf.variable_scope('attention', reuse=False):
        ch = input.get_shape().as_list()[3]
        if flag_I == True:
            x = tf.concat([input, I], axis=3)
        else:
            x = input

        f = ops.conv(x,
                     ch // channel_fac,
                     kernel=1,
                     stride=1,
                     sn=sn,
                     scope='f_conv')  # [bs, h, w, c']
        g = ops.conv(x,
                     ch // channel_fac,
                     kernel=1,
                     stride=1,
                     sn=sn,
                     scope='g_conv')  # [bs, h, w, c']
        h = ops.conv(x, ch, kernel=1, stride=1, sn=sn,
                     scope='h_conv')  # [bs, h, w, c]

        # N = h * w
        s = tf.matmul(ops.hw_flatten(g), ops.hw_flatten(f),
                      transpose_b=True)  # # [bs, N, N]

        beta = tf.nn.softmax(s)  # attention map

        o = tf.matmul(beta, ops.hw_flatten(h))  # [bs, N, C]
        gamma = tf.get_variable("gamma", [1],
                                initializer=tf.constant_initializer(0.0))
        o = tf.reshape(o, shape=input.shape)  # [bs, h, w, C]
        input = gamma * o + input

    print("Shape of beta...........................................",
          beta.get_shape(), input.get_shape())
    return input  #, beta
Exemplo n.º 17
0
def encoder_block(inputs, kernel_size, stride, channel_out, name, mode):
    with tf.variable_scope(name):
        convolved = conv(inputs,
                         channel_out,
                         stride=stride,
                         kernel_size=kernel_size)
        normalized = batchnorm(convolved, mode)
        rectified = lrelu(normalized, 0.2)
        return rectified
Exemplo n.º 18
0
 def build_conv(input, layers):
     print('build convnet')
     for layer_num, layer_config in enumerate(layers):
         (num_filter, kernel_size, stride) = layer_config
         with tf.variable_scope('conv_layer_{}'.format(layer_num)):
             input = ops.conv(input, num_filter, kernel_size, stride,
                              'bn', tf.nn.relu, self.is_training)
             print(input.shape)
     return input
Exemplo n.º 19
0
    def n_layer_discriminator(self,
                              input,
                              in_channels=3,
                              n_layers=3,
                              ndf=64,
                              norm_type='instance',
                              init_type='normal',
                              init_gain=1.0,
                              is_training=True,
                              sigmoid=None):
        """
            N-layer PatchGAN Discriminator
        """
        # first layer does not use instance normalization
        layer = ops.conv(input, in_channels=in_channels, out_channels=ndf, filter_size=4,
                         stride=2, weight_init_type=init_type, weight_init_gain=init_gain, norm_type=None,
                         activation_type='LeakyReLU', is_training=is_training, scope='layer0', reuse=self.reuse)

        nf_mult = 1
        nf_mult_prev = 1
        for idx in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** idx, 8)

            # perform 4x4 convolutions for n layers with a max of 512 filters
            layer = ops.conv(layer, in_channels=ndf*nf_mult_prev, out_channels=ndf*nf_mult,
                             filter_size=4, stride=2, weight_init_type=init_type, weight_init_gain=init_gain,
                             norm_type=norm_type, activation_type='LeakyReLU', is_training=is_training,
                             scope='layer'+str(idx), reuse=self.reuse)

        # nth layer of 4x4 convolutions uses a stride of 1
        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 8)
        layer = ops.conv(layer, in_channels=ndf*nf_mult_prev, out_channels=ndf*nf_mult,
                         filter_size=4, stride=1, weight_init_type=init_type, weight_init_gain=init_gain,
                         norm_type=norm_type, activation_type='LeakyReLU', is_training=is_training,
                         scope='layer'+str(n_layers), reuse=self.reuse)

        # produces a single channel prediction map
        layer = ops.conv(layer, in_channels=ndf*nf_mult, out_channels=1, filter_size=4, stride=1,
                         weight_init_type=init_type, weight_init_gain=init_gain, use_bias=True, norm_type=None,
                         activation_type=sigmoid, is_training=is_training, scope='d_out', reuse=self.reuse)

        return layer
Exemplo n.º 20
0
def network(X, training, label_cnt, dropout_keep_prob):
    with tf.name_scope('pre_inception'):
        with tf.name_scope('conv1layer'):
            X = op.conv(X,
                        filter_size=7,
                        stride_size=2,
                        padding='VALID',
                        out_channels=64,
                        a=tf.nn.relu)
            X = tf.pad(X, [[0, 0], [1, 1], [1, 1], [0, 0]])
            X = op.maxpool(X, filter_size=3, stride_size=2, padding='VALID')
            X = op.lrn(X)
        with tf.name_scope('conv2layer'):
            X = op.conv(X,
                        filter_size=3,
                        stride_size=1,
                        padding='SAME',
                        out_channels=192,
                        a=tf.nn.relu)
            X = op.maxpool(X, filter_size=3, stride_size=2, padding='VALID')
            X = op.lrn(X)
    with tf.name_scope('inception_blocks'):
        X = inception3a(X, training)
        X = inception3b(X, training)
        X = inception4a(X, training)
        logits1 = auxillary_logits(X, label_cnt, name='auxillary_layer1')
        X = inception4b(X, training)
        X = inception4c(X, training)
        X = inception4d(X, training)
        logits2 = auxillary_logits(X, label_cnt, name='auxillary_layer2')
        X = inception4e(X, training)
        X = inception5a(X, training)
        X = inception5b(X, training)
    with tf.name_scope('post_inception'):
        X = op.avgpool(X, filter_size=7, stride_size=1, padding='VALID')
        X = tf.nn.dropout(X, dropout_keep_prob)
    with tf.name_scope('fc1layer'):
        final_logits = op.fc(X, output_size=label_cnt, a=None)
    with tf.name_scope('Softmax'):
        out_probs = tf.nn.softmax(logits=X, axis=-1, name='softmax_op')

    return logits1, logits2, final_logits, out_probs
Exemplo n.º 21
0
def discriminator(inputs,
                  scope="discriminator",
                  is_training=True,
                  reuse=False,
                  shared_scope="shared_discriminator",
                  shared_reuse=False):
    """
        Define Discriminator Network
            inputs: input images
            scope: name of discriminator scope
            is_training: is training process
            reuse: reuse variable of scope
            shared_scope: name of shared discriminator scope
            shared_reuse: reuse variable of shared_scope
    """

    with tf.variable_scope(scope, reuse=reuse):
        net = inputs
        channel = params.discriminator.channel
        for i in range(params.discriminator.n_discriminator - 1):
            net = ops.conv(
                net,
                scope="conv_{}".format(i + 1),
                dim=channel,
                kernel_size=[3, 3],
                stride=2,
                activation_fn=ops.leaky_relu,
                is_training=is_training,
                weights_initializer=params.discriminator.weights_initializer)
            channel *= 2

        net = ops.conv(
            net,
            scope="conv_6",
            dim=1,
            kernel_size=[1, 1],
            stride=1,
            activation_fn=None,
            is_training=is_training,
            weights_initializer=params.discriminator.weights_initializer)
        #Final Conv Layer uses Sigmoid
        return net
Exemplo n.º 22
0
def convs(num):
    h0 = {}
    h0['data'] = np.array([[1]])
    h0['stride'] = 1
    for i in range(num):
        tmp = conv(h0, stride=1)
        h0 = tmp
        v = Visualizer(20)
        v.visual_pixel(tmp)
        v.save('.\\conv\\' + str(i) + '.jpg')
        print('conv: ', v.size())
Exemplo n.º 23
0
	def generatorII(self, x, t_aug, is_train, scope = 'generatorII'):
		with tf.variable_scope(scope): 
			depth = CONFIG['g_conv_depth']
			residual = True

			with tf.variable_scope('DownSampling'): 
				maps = ops.conv(x, depth/2, 'map1', k=3, s=2, normalizer=tfly.batch_norm, is_train=is_train, residual=False, activation=tf.nn.relu)
				maps = ops.conv(maps, depth/2, 'map1-0', k=3, s=1, normalizer=tfly.batch_norm, is_train=is_train, residual=residual, activation=tf.nn.relu)
				maps = ops.conv(maps, depth/1, 'map2', k=3, s=2, normalizer=tfly.batch_norm, is_train=is_train, residual=False, activation=tf.nn.relu)
				maps = ops.conv(maps, depth/1, 'map2-0', k=3, s=1, normalizer=tfly.batch_norm, is_train=is_train, residual=residual, activation=tf.nn.relu)

			with tf.variable_scope('MiddleLayer'): 
				size = maps.get_shape().as_list()[1]
				t_aug = tf.expand_dims(t_aug,1)
				t_aug = tf.expand_dims(t_aug,2)
				tiled = tf.tile(t_aug, [1,size,size,1])
				maps = tf.concat([maps, tiled],axis = -1)

				maps = ops.conv(maps, depth/1, 'mapt2', k=3, s=1, normalizer=tfly.batch_norm, is_train=is_train, residual=False, activation=tf.nn.relu)
				maps = ops.conv(maps, depth/1, 'mapt2_0', k=3, s=1, normalizer=tfly.batch_norm, is_train=is_train, residual=residual, activation=tf.nn.relu)

			with tf.variable_scope('UpSampling'): 
				maps = ops.deconv(maps, depth/2, 'map1', k=5, s=3, normalizer=tfly.batch_norm, is_train=is_train, activation=tf.nn.relu)
				maps = ops.deconv(maps, depth/4, 'map2', k=3, s=2, normalizer=tfly.batch_norm, is_train=is_train, activation=tf.nn.relu)
				g_out = ops.deconv(maps, 3, 'map2_0', k=3, s=1, normalizer=tfly.batch_norm, is_train=is_train, activation=tf.tanh)
			g_out = g_out * 0.5 + 0.5 - self.data.im_mean
		return g_out
Exemplo n.º 24
0
def backbone(inputs, is_training):
    arg_scope = resnet_arg_scope()
    with slim.arg_scope(arg_scope):
        _, end_points = resnet_v2_50(inputs, is_training=is_training)
    C3 = end_points["resnet_v2_50/block2/unit_3/bottleneck_v2"]
    C4 = end_points["resnet_v2_50/block3/unit_5/bottleneck_v2"]
    C5 = end_points["resnet_v2_50/block4/unit_3/bottleneck_v2"]
    P5 = conv("conv5", C5, 256, 1, 1, "SAME")
    P4 = merge("merge1", C4, P5)
    P3 = merge("merge2", C3, P4)
    P6 = conv("conv6", C5, 256, 3, 2, "SAME")
    P7 = conv("conv7", relu(P6), 256, 3, 2, "SAME")

    P3_class_logits = class_subnet(P3)
    P3_box_logits = box_subnet(P3)

    P4_class_logits = class_subnet(P4)
    P4_box_logits = box_subnet(P4)

    P5_class_logits = class_subnet(P5)
    P5_box_logits = box_subnet(P5)

    P6_class_logits = class_subnet(P6)
    P6_box_logits = box_subnet(P6)

    P7_class_logits = class_subnet(P7)
    P7_box_logits = box_subnet(P7)
    class_logits = tf.concat([P3_class_logits, P4_class_logits, P5_class_logits, P6_class_logits, P7_class_logits], axis=1)
    box_logits = tf.concat([P3_box_logits, P4_box_logits, P5_box_logits, P6_box_logits, P7_box_logits], axis=1)
    class_logits_dict = {"P3": P3_class_logits, "P4": P4_class_logits, "P5": P5_class_logits,
                         "P6": P6_class_logits, "P7": P7_class_logits}
    box_logits_dict = {"P3": P3_box_logits, "P4": P4_box_logits, "P5": P5_box_logits,
                       "P6": P6_box_logits, "P7": P7_box_logits}
    return class_logits, box_logits, class_logits_dict, box_logits_dict
    pass

# inputs = tf.placeholder(tf.float32, [None, IMG_H, IMG_W, 3])
# is_training = tf.placeholder(tf.bool)
# backbone(inputs, is_training)
Exemplo n.º 25
0
def discriminator(hr_images, scope, dim):
    """
    Discriminator
    """
    conv_lrelu = partial(conv, activation_fn=lrelu)

    def _combine(x, newdim, name, z=None):
        x = conv_lrelu(x, newdim, 1, 1, name)
        y = x if z is None else tf.concat([x, z], axis=-1)
        return minibatch_stddev_layer(y)

    def _conv_downsample(x, dim, ksize, name):
        y = conv2d_downscale2d(x, dim, ksize, name=name)
        y = lrelu(y)
        return y

    with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):
        with tf.compat.v1.variable_scope("res_4x"):
            net = _combine(hr_images[1], newdim=dim, name="from_input")
            net = conv_lrelu(net, dim, 3, 1, "conv1")
            net = conv_lrelu(net, dim, 3, 1, "conv2")
            net = conv_lrelu(net, dim, 3, 1, "conv3")
            net = _conv_downsample(net, dim, 3, "conv_down")

        with tf.compat.v1.variable_scope("res_2x"):
            net = _combine(hr_images[2], newdim=dim, name="from_input", z=net)
            dim *= 2
            net = conv_lrelu(net, dim, 3, 1, "conv1")
            net = conv_lrelu(net, dim, 3, 1, "conv2")
            net = conv_lrelu(net, dim, 3, 1, "conv3")
            net = _conv_downsample(net, dim, 3, "conv_down")

        with tf.compat.v1.variable_scope("res_1x"):
            net = _combine(hr_images[4], newdim=dim, name="from_input", z=net)
            dim *= 2
            net = conv_lrelu(net, dim, 3, 1, "conv")
            net = _conv_downsample(net, dim, 3, "conv_down")

        with tf.compat.v1.variable_scope("bn"):
            dim *= 2
            net = conv_lrelu(net, dim, 3, 1, "conv1")
            net = _conv_downsample(net, dim, 3, "conv_down1")
            net = minibatch_stddev_layer(net)

            # dense
            dim *= 2
            net = conv_lrelu(net, dim, 1, 1, "dense1")
            net = conv(net, 1, 1, 1, "dense2")
            net = tf.reduce_mean(net, axis=[1, 2])

            return net
def network(X,label_cnt,training,dropout_keep_prob):   
	with tf.variable_scope('conv1layer'):
		X=op.batch_norm(X,training=training)
		X=tf.nn.relu(X)
		X=op.conv(X,filter_size=[3,3],out_channels=8,stride_size=1,padding='SAME',a=None)
		X=tf.nn.max_pool(X,ksize=[1,4,4,1],strides=[1,4,4,1],padding='VALID')
	print('conv1layer',X.get_shape().as_list())

	with tf.variable_scope('conv2layer'):
		X=op.batch_norm(X,training=training)
		X=tf.nn.relu(X)
		X=op.conv(X,filter_size=[3,3],out_channels=16,stride_size=1,padding='SAME',a=None)
		X=tf.nn.max_pool(X,ksize=[1,4,4,1],strides=[1,4,4,1],padding='VALID')
	print('conv2layer',X.get_shape().as_list())

	with tf.variable_scope('conv3layer'):
		X=op.batch_norm(X,training=training)
		X=tf.nn.relu(X)
		X=op.conv(X,filter_size=[3,3],out_channels=32,stride_size=1,padding='SAME')
		X=tf.nn.max_pool(X,ksize=[1,4,4,1],strides=[1,4,4,1],padding='VALID')
	print('conv3layer',X.get_shape().as_list())

	with tf.variable_scope('conv4layer'):
		X=op.batch_norm(X,training=training)
		X=tf.nn.relu(X)
		X=op.conv(X,filter_size=[3,3],out_channels=64,stride_size=1,padding='SAME')
	print('conv3layer',X.get_shape().as_list())

	with tf.variable_scope('fc1layer'):
		X=op.fc(X,output_size=256,a=tf.nn.relu)

	with tf.variable_scope('fc2layer'):
		X=op.fc(X,output_size=label_cnt,a=None)
	
	with tf.variable_scope('softmaxlayer'):
		out_probs=tf.nn.softmax(logits=X,axis=-1,name='softmax_op')
		
	return X,out_probs
Exemplo n.º 27
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
         # inputs = tf.random_crop(inputs, [-1, 70, 70, 3])
         inputs = conv("conv1_1", inputs, 64, 3, 2)
         inputs = leaky_relu(inputs, 0.2)
         # inputs = conv("conv1_2", inputs, 64, 3, is_SN=True)
         # inputs = leaky_relu(inputs, 0.2)
         inputs = conv("conv2_1", inputs, 128, 3, 2)
         inputs = batchnorm(inputs, train_phase, "BN1")
         inputs = leaky_relu(inputs, 0.2)
         # inputs = conv("conv2_2", inputs, 128, 3, is_SN=True)
         # inputs = leaky_relu(inputs, 0.2)
         inputs = conv("conv3_1", inputs, 256, 3, 2)
         inputs = batchnorm(inputs, train_phase, "BN2")
         inputs = leaky_relu(inputs, 0.2)
         # inputs = conv("conv3_2", inputs, 256, 3, is_SN=True)
         # inputs = leaky_relu(inputs, 0.2)
         inputs = conv("conv4_1", inputs, 512, 3, 2)
         inputs = batchnorm(inputs, train_phase, "BN3")
         inputs = leaky_relu(inputs, 0.2)
         # inputs = fully_connected("fc", inputs, 512, is_SN=True)
         output = fully_connected("output", inputs, 1)
     return output
Exemplo n.º 28
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name):
         inputs = conv("conv1", inputs, 64, 9)
         inputs = prelu("alpha1", inputs)
         skip_connection = tf.identity(inputs)
         #The paper has 16 residual blocks
         for b in range(1, self.B + 1):
             inputs = B_residual_blocks("B"+str(b), inputs, train_phase)
         # inputs = B_residual_blocks("B2", inputs, train_phase)
         # inputs = B_residual_blocks("B3", inputs, train_phase)
         # inputs = B_residual_blocks("B4", inputs, train_phase)
         # inputs = B_residual_blocks("B5", inputs, train_phase)
         inputs = conv("conv2", inputs, 64, 3)
         inputs = batchnorm(inputs, train_phase, "BN")
         inputs = inputs + skip_connection
         inputs = conv("conv3", inputs, 256, 3)
         inputs = pixelshuffler(inputs, 2)
         inputs = prelu("alpha2", inputs)
         inputs = conv("conv4", inputs, 256, 3)
         inputs = pixelshuffler(inputs, 2)
         inputs = prelu("alpha3", inputs)
         inputs = conv("conv5", inputs, 3, 9)
     return tf.nn.tanh(inputs)
Exemplo n.º 29
0
def ResNet(image, is_train, num_classes=10, reuse=False, name='ResNet-cifar'):
    channel_list = [16, 32, 64]
    with tf.variable_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        else:
            assert tf.get_variable_scope().reuse is False
        Res_Block_0 = tf.nn.relu(batch_norm_layer(conv(image, 16, 3, 'Block_0/res_conv_0', s=1, use_bias=False), is_train))
        Res_Block_1 = residual_block(Res_Block_0, channel_list[0], False, is_train, 'Block_1')
        Res_Block_2 = residual_block(Res_Block_1, channel_list[1], True, is_train, 'Block_2')
        Res_Block_3 = residual_block(Res_Block_2, channel_list[2], True, is_train, 'Block_3')
        ave_vec = tf.reshape(tf.nn.avg_pool(Res_Block_3, [1, 8, 8, 1], [1, 1, 1, 1], 'VALID'), [-1, channel_list[2]])
        logits = tf.layers.dense(ave_vec, num_classes, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='logits')
        embed = logits
    return logits, embed
Exemplo n.º 30
0
def DenseNet(inputs, nums_out, growth_rate, train_phase, depth):
    inputs = preprocess(inputs)
    n = (depth - 4) // 3
    inputs = conv("conv1", inputs, nums_out=16, k_size=3)
    inputs = DenseBlock("DenseBlock1", inputs, n, growth_rate, train_phase)
    inputs = Transition("Transition_Layer1",
                        inputs,
                        nums_out=growth_rate,
                        train_phase=train_phase)
    inputs = DenseBlock("DenseBlock2", inputs, n, growth_rate, train_phase)
    inputs = Transition("Transition_Layer2",
                        inputs,
                        nums_out=growth_rate,
                        train_phase=train_phase)
    inputs = DenseBlock("DenseBlock3", inputs, n, growth_rate, train_phase)
    inputs = batchnorm(inputs, train_phase, "BN")
    inputs = relu(inputs)
    inputs = global_avg_pooling(inputs)
    inputs = fully_connected("FC", inputs, nums_out)
    return inputs