Example #1
0
    def _create_conv(self):
        self._create_input()
        arg_scope = tf.contrib.framework.arg_scope
        with arg_scope([conv3d], nl=tf.nn.relu,
                       trainable=True, mode=self.mode, graph=self.graph):
            conv1 = conv3d(self.image, 3, 64, 'conv1', 'wc1', 'bc1')
            pool1 = max_pool3d(conv1, 'pool1', padding='SAME', filter_size=[1,2,2])

            conv2 = conv3d(pool1, 3, 128, 'conv2', 'wc2', 'bc2')
            pool2 = max_pool3d(conv2, 'pool2', padding='SAME')

            conv3a = conv3d(pool2, 3, 256, 'conv3a', 'wc3a', 'bc3a')
            conv3b = conv3d(conv3a, 3, 256, 'conv3b', 'wc3b', 'bc3b')
            pool3 = max_pool3d(conv3b, 'pool3', padding='SAME')

            conv4a = conv3d(pool3, 3, 256, 'conv4a', 'wc4a', 'bc4a')
            conv4b = conv3d(conv4a, 3, 256, 'conv4b', 'wc4b', 'bc4b')
            pool4 = max_pool3d(conv4b, 'pool4', padding='SAME')

            conv5a = conv3d(pool4, 3, 256, 'conv5a', 'wc5a', 'bc5a')
            conv5b = conv3d(conv5a, 3, 256, 'conv5b', 'wc5b', 'bc5b')
            pool5 = max_pool3d(conv5b, 'pool5', padding='SAME')

            self.layer['conv1'] = conv1
            self.layer['conv2'] = conv2
            self.layer['conv3'] = conv3b
            self.layer['conv4'] = conv4b
            self.layer['pool5'] = pool5
            self.layer['conv_out'] = self.layer['conv5'] = conv5b

        return pool5
Example #2
0
    def inference(self):
        print('input_shape:', self.X.shape.as_list())
        conv1 = conv3d('conv1',
                       x=self.X,
                       w=None,
                       num_filters=self.output_channels['conv1'],
                       kernel_size=(3, 3, 3),
                       stride=(1, 2, 2),
                       l2_strength=L2_DECAY,
                       bias=0.0,
                       batchnorm_enabled=True,
                       is_training=self.is_training,
                       activation=tf.nn.relu,
                       padding='SAME')
        print('conv1_shape:', conv1.shape.as_list())
        max_pool = max_pool_3d(conv1,
                               size=(3, 3, 3),
                               stride=(2, 2, 2),
                               name='max_pool')
        print('max3d_shape:', max_pool.shape.as_list())
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        print('stag2_shape:', stage2.shape.as_list())
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        print('stag3_shape:', stage3.shape.as_list())
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        print('stag4_shape:', stage4.shape.as_list())
        global_pool = avg_pool_3d(stage4,
                                  size=(1, 5, 5),
                                  stride=(1, 1, 1),
                                  name='global_pool',
                                  padding='VALID')
        print('avg3d_shape:', global_pool.shape.as_list())

        drop_out = dropout(global_pool,
                           is_training=self.is_training,
                           keep_prob=0.5)
        logits_unflattened = conv3d('fc',
                                    drop_out,
                                    w=None,
                                    num_filters=NUM_CLASS,
                                    kernel_size=(1, 1, 1),
                                    l2_strength=L2_DECAY,
                                    bias=0.0,
                                    is_training=self.is_training)
        print('convn_shape:', logits_unflattened.shape.as_list())
        logits = flatten(logits_unflattened)
        print('fc_re_shape:', logits.shape.as_list())
        return logits
Example #3
0
def conv_bn_relu_drop(x,
                      kernal,
                      phase,
                      drop,
                      image_z=None,
                      height=None,
                      width=None,
                      scope=None):
    with tf.name_scope(scope):
        W = weight_init(shape=kernal,
                        n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                        n_outputs=kernal[-1],
                        activefunction='relu',
                        variable_name=scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B')
        conv = conv3d(x, W) + B
        conv = normalizationlayer(conv,
                                  is_train=phase,
                                  height=height,
                                  width=width,
                                  image_z=image_z,
                                  norm_type='group',
                                  scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop)
        return conv
Example #4
0
def conv_sigmod(x, kernal, scope=None):
    with tf.name_scope(scope):
        W = weight_init(shape=kernal,
                        n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                        n_outputs=kernal[-1],
                        activefunction='sigomd',
                        variable_name=scope + 'W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'B')
        conv = conv3d(x, W) + B
        conv = tf.nn.sigmoid(conv)
        return conv
Example #5
0
def IPN(x,
        PLM_NUM=5,
        filter_size=[3, 3, 3],
        LAYER_NUM=3,
        NUM_OF_CLASS=2,
        pooling_size=[]):
    """
    :param x: input tensor,shape[?,nx,ny,nz,channels]
    :param filter_size: size of the convolution filer
    :param PLM_NUM: number of PLM
    :param LAYER_NUM: number of conv layers in each PLM
    :return:
    """
    #Initialize Variable
    #whether use pooling size by automatic or by yourself
    if not pooling_size:
        pooling_size = utils.cal_downsampling_size_combine(x.shape[1], PLM_NUM)
    else:
        PLM_NUM = len(pooling_size)

    W = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    b = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    conv = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    pool = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    variables = []

    #features = utils.cal_channel_num(PLM_NUM)
    features = np.ones(PLM_NUM, dtype='int32') * 64
    ##################### print model para  #############
    print('')
    print('-----------------  model paras ------------------')
    resize = 1
    print('PLM DS SIZE: ', end='')
    for index in pooling_size:
        resize *= index
    print('{}->{} = '.format(x.shape[1], resize), end='')
    for i, s in enumerate(pooling_size):
        if i == 0:
            print(str(s), end='')
        else:
            print('x' + str(s), end='')

    print('')
    print('conv channel nums : ', end='')
    for f in features:
        print(f, ',', end='')
    print('')
    print('---------------------  end ----------------------')
    print('')
    ######################################################

    features_count = -1
    stddev = 0.02

    #Build Projection Learning Module
    for PLM in range(PLM_NUM):
        features_count += 1
        if PLM == 0:
            input = x
        else:
            input = pool[PLM - 1]
        for LAYER in range(LAYER_NUM):
            b[PLM][LAYER] = bias_variable([features[features_count]],
                                          name="b{}".format(PLM + 1))
            in_channels = input.get_shape().as_list()[-1]
            W[PLM][LAYER] = weight_variable(
                filter_size + [in_channels, features[features_count]],
                stddev,
                name="w{}_{}".format(PLM + 1, LAYER + 1))
            variables.append(W[PLM][LAYER])
            variables.append(b[PLM][LAYER])
            conv[PLM][LAYER] = tf.nn.relu(
                conv3d(input, W[PLM][LAYER], b[PLM][LAYER]))
            if LAYER == LAYER_NUM - 1:
                pool[PLM] = Unidirectional_pool(
                    conv[PLM][LAYER], pooling_size[PLM])  #Unidirectional_pool
            else:
                input = conv[PLM][LAYER]

    #Output MAP
    Wop = weight_variable(filter_size +
                          [features[features_count], NUM_OF_CLASS],
                          stddev,
                          name="w_output")
    bop = bias_variable([NUM_OF_CLASS], name="b_output")
    output = tf.nn.relu(
        tf.nn.bias_add(
            tf.nn.conv3d(pool[PLM_NUM - 1],
                         Wop,
                         strides=[1, 1, 1, 1, 1],
                         padding="SAME"), bop))

    sf = tf.nn.softmax(output)
    pred = tf.argmax(sf, axis=-1, name="prediction")
    return output, pred, variables, sf