Exemplo n.º 1
0
    def _inference(self, X, keep_prob, is_train):
        dropout_rate = [0.9, 0.8, 0.7, 0.6, 0.5]
        layers = [64, 128, 256, 512, 512]
        iters = [2, 2, 3, 3]
        h = X

        # VGG Network Layer
        for i in range(4):
            for j in range(iters[i]):
                with tf.variable_scope('layers%s_%s' % (i, j)) as scope:
                    h = F.conv(h, layers[i])
                    h = F.batch_norm(h, is_train)
                    h = F.activation(h)
                    h = F.dropout(h, dropout_rate[i], is_train)
            h = F.max_pool(h)

        # Fully Connected Layer
        with tf.variable_scope('fully_connected_layer') as scope:
            h = F.dense(h, layers[i + 1])
            h = F.batch_norm(h, is_train)
            h = F.activation(h)
            h = F.dropout(h, dropout_rate[i + 1], is_train)

        # Softmax Layer
        with tf.variable_scope('softmax_layer') as scope:
            h = F.dense(h, self._num_classes)

        return h
Exemplo n.º 2
0
    def _inference(self, CC, MLO, keep_prob, is_train):
        layers = [3, 16, 32, 64, 64]

        cc = CC
        mlo = MLO

        for i in range(4):
            with tf.variable_scope('CC_layers_%s' % i) as scope:
                cc = F.conv(cc, layers[i])
                cc = F.batch_norm(cc, is_train)
                cc = F.activation(cc)
            cc = F.max_pool(cc)
        with tf.variable_scope('CC_features') as scope:
            cc = F.dense(cc, layers[i + 1])
            cc = F.batch_norm(cc, is_train)
            cc = F.activation(cc)

        for j in range(4):
            with tf.variable_scope('MLO_layers_%s' % j) as scope:
                mlo = F.conv(mlo, layers[j])
                mlo = F.batch_norm(mlo, is_train)
                mlo = F.activation(mlo)
            mlo = F.max_pool(mlo)
        with tf.variable_scope('MLO_features') as scope:
            mlo = F.dense(mlo, layers[j + 1])
            mlo = F.batch_norm(mlo, is_train)
            mlo = F.activation(mlo)

        with tf.variable_scope('softmax') as scope:
            concat = tf.concat(1, [cc, mlo])
            h = F.dense(concat, self._num_classes)

        return h
Exemplo n.º 3
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     h1 = F.conv(F.activation(F.batch_norm(self, 'bn1', h0, is_train)), channels, strides)
     h1 = F.dropout(h1, keep_prob, is_train)
     h2 = F.conv(F.activation(F.batch_norm(self, 'bn2', h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Exemplo n.º 4
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     with tf.variable_scope('residual_first'):
         h1 = F.conv(F.activation(F.batch_norm(h0, is_train)), channels, strides)
         h1 = F.dropout(h1, keep_prob, is_train)
     with tf.variable_scope('residual_second'):
         h2 = F.conv(F.activation(F.batch_norm(h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
Exemplo n.º 5
0
    def _inference(self, X, keep_prob, is_train):
        h = F.conv(X, 16)
        for i in range(self._layers):
            with tf.variable_scope(str(16*self._k)+'_layers_%s' %i):
                h = self._residual(h, channels=16*self._k, strides=1, keep_prob=keep_prob, is_train=is_train)
        for channels in [32*self._k, 64*self._k]:
            for i in range(self._layers):
                with tf.variable_scope(str(channels)+'_layers_%s' %i):
                    strides = 2 if i == 0 else 1
                    h = self._residual(h, channels, strides, keep_prob, is_train)
        h = F.activation(F.batch_norm(self, 'bn', h, is_train))
        h = tf.reduce_mean(h, reduction_indices=[1,2])
        h = F.dense(h, self._num_classes)

        return h