Beispiel #1
0
 def last_cnn(self, h, resolution, ch_in, ch_out):
     with nn.parameter_scope("phase_{}".format(resolution)):
         with nn.parameter_scope("conv1"):
             h = conv(h,
                      ch_in,
                      kernel=(3, 3),
                      pad=(1, 1),
                      stride=(1, 1),
                      with_bias=not self.use_ln,
                      use_wscale=self.use_wscale,
                      use_he_backward=self.use_he_backward)
             h = LN(h, use_ln=self.use_ln)
             h = self.activation(h)
         with nn.parameter_scope("conv2"):
             h = conv(h,
                      ch_out,
                      kernel=(4, 4),
                      pad=(0, 0),
                      stride=(1, 1),
                      with_bias=not self.use_ln,
                      use_wscale=self.use_wscale,
                      use_he_backward=self.use_he_backward)
             h = LN(h, use_ln=self.use_ln)
             h = self.activation(h)
         with nn.parameter_scope("linear"):
             h = affine(h,
                        1,
                        with_bias=True,
                        use_wscale=self.use_wscale,
                        use_he_backward=self.use_he_backward)
     return h
Beispiel #2
0
    def _inference(self, CC, MLO, keep_prob, is_train):
        layers = [3, 16, 32, 64, 64]

        cc = CC
        mlo = MLO

        for i in range(4):
            with tf.variable_scope('CC_layers_%s' % i) as scope:
                cc = F.conv(cc, layers[i])
                cc = F.batch_norm(cc, is_train)
                cc = F.activation(cc)
            cc = F.max_pool(cc)
        with tf.variable_scope('CC_features') as scope:
            cc = F.dense(cc, layers[i + 1])
            cc = F.batch_norm(cc, is_train)
            cc = F.activation(cc)

        for j in range(4):
            with tf.variable_scope('MLO_layers_%s' % j) as scope:
                mlo = F.conv(mlo, layers[j])
                mlo = F.batch_norm(mlo, is_train)
                mlo = F.activation(mlo)
            mlo = F.max_pool(mlo)
        with tf.variable_scope('MLO_features') as scope:
            mlo = F.dense(mlo, layers[j + 1])
            mlo = F.batch_norm(mlo, is_train)
            mlo = F.activation(mlo)

        with tf.variable_scope('softmax') as scope:
            concat = tf.concat(1, [cc, mlo])
            h = F.dense(concat, self._num_classes)

        return h
Beispiel #3
0
    def cnn(self, h, resolution, channel, test):
        """CNN block

        The following operations are performed two times.

        1. Upsampling
        2. Conv
        3. Pixel-wise normalization
        4. Relu
        """
        h = F.unpooling(h, kernel=(2, 2))
        with nn.parameter_scope("phase_{}".format(resolution)):
            with nn.parameter_scope("conv1"):
                h = conv(h, channel, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
                         with_bias=not self.use_bn,
                         use_wscale=self.use_wscale,
                         use_he_backward=self.use_he_backward)
                h = pixel_wise_feature_vector_normalization(
                    BN(h, use_bn=self.use_bn, test=test))
                h = self.activation(h)
            with nn.parameter_scope("conv2"):
                h = conv(h, channel, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
                         with_bias=not self.use_bn,
                         use_wscale=self.use_wscale,
                         use_he_backward=self.use_he_backward)
                h = pixel_wise_feature_vector_normalization(
                    BN(h, use_bn=self.use_bn, test=test))
                h = self.activation(h)
        return h
Beispiel #4
0
    def cnn(self, h, resolution, ch_in, ch_out):
        """CNN block

        The following operations are performed two times.

        1. Conv
        2. Layer normalization
        3. Leaky relu
        """
        with nn.parameter_scope("phase_{}".format(resolution)):
            with nn.parameter_scope("conv1"):
                h = conv(h,
                         ch_in,
                         kernel=(3, 3),
                         pad=(1, 1),
                         stride=(1, 1),
                         with_bias=not self.use_ln,
                         use_wscale=self.use_wscale,
                         use_he_backward=self.use_he_backward)
                h = LN(h, use_ln=self.use_ln)
                h = self.activation(h)
            with nn.parameter_scope("conv2"):
                h = conv(h,
                         ch_out,
                         kernel=(3, 3),
                         pad=(1, 1),
                         stride=(1, 1),
                         with_bias=not self.use_ln,
                         use_wscale=self.use_wscale,
                         use_he_backward=self.use_he_backward)
                h = LN(h, use_ln=self.use_ln)
                h = self.activation(h)
        h = F.average_pooling(h, kernel=(2, 2))
        return h
Beispiel #5
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return tf.nn.softmax(h)
Beispiel #6
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return h
Beispiel #7
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     h1 = F.conv(F.activation(F.batch_norm(self, 'bn1', h0, is_train)), channels, strides)
     h1 = F.dropout(h1, keep_prob, is_train)
     h2 = F.conv(F.activation(F.batch_norm(self, 'bn2', h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Beispiel #8
0
 def _residual(self, h, channels, strides, keep_prob):
     h0 = h
     h1 = F.dropout(
         F.conv(F.activation(F.batch_normalization(h0)), channels, strides),
         keep_prob)
     h2 = F.conv(F.activation(F.batch_normalization(h1)), channels)
     # c.f. http://gitxiv.com/comments/7rffyqcPLirEEsmpX
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Beispiel #9
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     with tf.variable_scope('residual_first'):
         h1 = F.conv(F.activation(F.batch_norm(h0, is_train)), channels, strides)
         h1 = F.dropout(h1, keep_prob, is_train)
     with tf.variable_scope('residual_second'):
         h2 = F.conv(F.activation(F.batch_norm(h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Beispiel #10
0
 def _residual(self, h, channels, strides):
     h0 = h
     h1 = F.activation(
         F.batch_normalization(
             F.conv(h0, channels, strides, bias_term=False)))
     h2 = F.batch_normalization(F.conv(h1, channels, bias_term=False))
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h3 = F.avg_pool(h0)
         h4 = tf.pad(h3,
                     [[0, 0], [0, 0], [0, 0], [channels / 4, channels / 4]])
         h = h2 + h4
     return h
Beispiel #11
0
 def _residual(self, h, channels, strides):
     h0 = h
     h1 = F.activation(
         F.batch_normalization(
             F.conv(h0, channels, strides, bias_term=False)))
     h2 = F.batch_normalization(F.conv(h1, channels, bias_term=False))
     # c.f. http://gitxiv.com/comments/7rffyqcPLirEEsmpX
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h3 = F.avg_pool(h0)
         h4 = tf.pad(h3,
                     [[0, 0], [0, 0], [0, 0], [channels / 4, channels / 4]])
         h = h2 + h4
     return F.activation(h)
Beispiel #12
0
 def first_cnn(self, h, resolution, channel, test=False):
     with nn.parameter_scope("phase_{}".format(resolution)):
         # affine is 1x1 conv with 4x4 kernel and 3x3 pad.
         with nn.parameter_scope("conv1"):
             h = affine(h,
                        channel * 4 * 4,
                        with_bias=not self.use_bn,
                        use_wscale=self.use_wscale,
                        use_he_backward=self.use_he_backward)
             h = BN(h, use_bn=self.use_bn, test=test)
             h = F.reshape(h, (h.shape[0], channel, 4, 4))
             h = pixel_wise_feature_vector_normalization(
                 BN(h, use_bn=self.use_bn, test=test))
             h = self.activation(h)
         with nn.parameter_scope("conv2"):
             h = conv(h,
                      channel,
                      kernel=(3, 3),
                      pad=(1, 1),
                      stride=(1, 1),
                      with_bias=not self.use_bn,
                      use_wscale=self.use_wscale,
                      use_he_backward=self.use_he_backward)
             h = pixel_wise_feature_vector_normalization(
                 BN(h, use_bn=self.use_bn, test=test))
             h = self.activation(h)
     return h
Beispiel #13
0
    def _inference(self, X, keep_prob, is_train):
        dropout_rate = [0.9, 0.8, 0.7, 0.6, 0.5]
        layers = [64, 128, 256, 512, 512]
        iters = [2, 2, 3, 3]
        h = X

        # VGG Network Layer
        for i in range(4):
            for j in range(iters[i]):
                with tf.variable_scope('layers%s_%s' % (i, j)) as scope:
                    h = F.conv(h, layers[i])
                    h = F.batch_norm(h, is_train)
                    h = F.activation(h)
                    h = F.dropout(h, dropout_rate[i], is_train)
            h = F.max_pool(h)

        # Fully Connected Layer
        with tf.variable_scope('fully_connected_layer') as scope:
            h = F.dense(h, layers[i + 1])
            h = F.batch_norm(h, is_train)
            h = F.activation(h)
            h = F.dropout(h, dropout_rate[i + 1], is_train)

        # Softmax Layer
        with tf.variable_scope('softmax_layer') as scope:
            h = F.dense(h, self._num_classes)

        return h
Beispiel #14
0
    def to_RGB(self, h, resolution):
        """To RGB layer

        To RGB projects feature maps to RGB maps.
        """
        with nn.parameter_scope("to_rgb_{}".format(resolution)):
            h = conv(h, 3, kernel=(1, 1), pad=(0, 0), stride=(1, 1),
                     with_bias=True,
                     use_wscale=self.use_wscale,
                     use_he_backward=self.use_he_backward)
        return h
Beispiel #15
0
    def from_RGB(self, h, resolution, channel):
        """From RGB layer

        From RGB projects RGB maps to feature maps.
        """
        with nn.parameter_scope("from_rgb_{}".format(resolution)):
            h = conv(h, channel, kernel=(1, 1), pad=(0, 0), stride=(1, 1),
                     with_bias=True,
                     use_wscale=self.use_wscale,
                     use_he_backward=self.use_he_backward)
            h = self.activation(h)
        return h
Beispiel #16
0
 def _inference(self, X, keep_prob):
     h = X
     h = F.activation(F.batch_normalization(F.conv(h, 16, bias_term=False)))
     for i in range(self._layers):
         h = self._residual(h, channels=16, strides=1)
     for channels in [32, 64]:
         for i in range(self._layers):
             strides = 2 if i == 0 else 1
             h = self._residual(h, channels, strides)
     h = tf.reduce_mean(h, reduction_indices=[1,
                                              2])  # Global Average Pooling
     h = F.dense(h, self._num_classes)
     return h
Beispiel #17
0
    def _inference(self, X, keep_prob, is_train):
        h = F.conv(X, 16)
        for i in range(self._layers):
            with tf.variable_scope(str(16*self._k)+'_layers_%s' %i):
                h = self._residual(h, channels=16*self._k, strides=1, keep_prob=keep_prob, is_train=is_train)
        for channels in [32*self._k, 64*self._k]:
            for i in range(self._layers):
                with tf.variable_scope(str(channels)+'_layers_%s' %i):
                    strides = 2 if i == 0 else 1
                    h = self._residual(h, channels, strides, keep_prob, is_train)
        h = F.activation(F.batch_norm(self, 'bn', h, is_train))
        h = tf.reduce_mean(h, reduction_indices=[1,2])
        h = F.dense(h, self._num_classes)

        return h
Beispiel #18
0
    def _inference(self, X, keep_prob, is_train):
        # Conv_layer 1
        conv = F.conv(X, 192)
        batch_norm = F._batch_norm(self, 'bn1', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn2', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        max_pool = F.max_pool(dropout) # 16 x 16

        # Conv_layer 2
        conv = F.conv(max_pool, 192)
        batch_norm = F._batch_norm(self, 'bn3', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn4', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        max_pool = F.max_pool(dropout) # 8 x 8

        # Conv_layer 3
        conv = F.conv(max_pool, 256)
        batch_norm = F._batch_norm(self, 'bn5', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn6', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn7', conv, is_train)
        dropout = F.dropout(relu, 0.7, is_train)

        max_pool = F.max_pool(dropout) # 4 x 4

         # Conv_layer 4
        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn8', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(dropout, 512)
        batch_norm = F._batch_norm(self, 'bn9', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn10', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        max_pool = F.max_pool(dropout) # 2 x 2

        # Fully Connected Layer
        h = tf.reduce_mean(max_pool, reduction_indices=[1,2])
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, 512)
        h = F._batch_norm(self, 'bn11', h, is_train)
        h = F.activation(h)
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, self._num_classes)

        return h