コード例 #1
0
def down_sampling(x,
                  kernal,
                  phase,
                  drop,
                  image_z=None,
                  height=None,
                  width=None,
                  scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal,
                               n_inputs=kernal[0] * kernal[1] * kernal[2] *
                               kernal[3],
                               n_outputs=kernal[-1],
                               activefunction='relu',
                               variable_name=scope + 'W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'B')
        conv = conv3d(x, W, 2) + B
        conv = normalizationlayer(conv,
                                  is_train=phase,
                                  height=height,
                                  width=width,
                                  image_z=image_z,
                                  norm_type='group',
                                  G=20,
                                  scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop)
        return conv
コード例 #2
0
def attngatingblock(x, g, inputfilters, outfilters, scale_factor, phase, image_z=None, height=None, width=None,
                    scope=None):
    """
    take g which is the spatially smaller signal, do a conv to get the same number of feature channels as x (bigger spatially)
    do a conv on x to also get same feature channels (theta_x)
    then, upsample g to be same size as x add x and g (concat_xg) relu, 1x1x1 conv, then sigmoid then upsample the final -
    this gives us attn coefficients
    :param x:
    :param g:
    :param inputfilters:
    :param outfilters:
    :param scale_factor:2
    :param scope:
    :return:
    """
    with tf.name_scope(scope):
        kernalx = (1, 1, 1, inputfilters, outfilters)
        Wx = weight_xavier_init(shape=kernalx, n_inputs=kernalx[0] * kernalx[1] * kernalx[2] * kernalx[3],
                                n_outputs=kernalx[-1], activefunction='relu', variable_name=scope + 'conv_Wx')
        Bx = bias_variable([kernalx[-1]], variable_name=scope + 'conv_Bx')
        theta_x = conv3d(x, Wx, scale_factor) + Bx
        kernalg = (1, 1, 1, inputfilters, outfilters)
        Wg = weight_xavier_init(shape=kernalg, n_inputs=kernalg[0] * kernalg[1] * kernalg[2] * kernalg[3],
                                n_outputs=kernalg[-1], activefunction='relu', variable_name=scope + 'conv_Wg')
        Bg = bias_variable([kernalg[-1]], variable_name=scope + 'conv_Bg')
        phi_g = conv3d(g, Wg) + Bg

        add_xg = resnet_Add(theta_x, phi_g)
        act_xg = tf.nn.relu(add_xg)

        kernalpsi = (1, 1, 1, outfilters, 1)
        Wpsi = weight_xavier_init(shape=kernalpsi, n_inputs=kernalpsi[0] * kernalpsi[1] * kernalpsi[2] * kernalpsi[3],
                                  n_outputs=kernalpsi[-1], activefunction='relu', variable_name=scope + 'conv_Wpsi')
        Bpsi = bias_variable([kernalpsi[-1]], variable_name=scope + 'conv_Bpsi')
        psi = conv3d(act_xg, Wpsi) + Bpsi
        sigmoid_psi = tf.nn.sigmoid(psi)

        upsample_psi = upsample3d(sigmoid_psi, scale_factor=scale_factor, scope=scope + "resampler")

        # Attention: upsample_psi * x
        # upsample_psi = layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=4),
        #                              arguments={'repnum': outfilters})(upsample_psi)
        gat_x = tf.multiply(upsample_psi, x)
        kernal_gat_x = (1, 1, 1, outfilters, outfilters)
        Wgatx = weight_xavier_init(shape=kernal_gat_x,
                                   n_inputs=kernal_gat_x[0] * kernal_gat_x[1] * kernal_gat_x[2] * kernal_gat_x[3],
                                   n_outputs=kernal_gat_x[-1], activefunction='relu',
                                   variable_name=scope + 'conv_Wgatx')
        Bgatx = bias_variable([kernalpsi[-1]], variable_name=scope + 'conv_Bgatx')
        gat_x_out = conv3d(gat_x, Wgatx) + Bgatx
        gat_x_out = normalizationlayer(gat_x_out, is_train=phase, height=height, width=width, image_z=image_z,
                                       norm_type='group', scope=scope)
    return gat_x_out
コード例 #3
0
def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None):
    """
    :param x:
    :param kernal:
    :param phase:
    :param drop:
    :param image_z:
    :param height:
    :param width:
    :param scope:
    :return:
    """
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                               n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B')
        conv = conv3d(x, W) + B
        conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group',
                                  scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop)
        return conv
コード例 #4
0
def gatingsignal3d(x, kernal, phase, image_z=None, height=None, width=None, scope=None):
    """this is simply 1x1x1 convolution, bn, activation,Gating Signal(Query)
    :param x:
    :param kernal:(1,1,1,inputfilters,outputfilters)
    :param phase:
    :param drop:
    :param image_z:
    :param height:
    :param width:
    :param scope:
    :return:
    """
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                               n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B')
        conv = conv3d(x, W) + B
        conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group',
                                  scope=scope)
        conv = tf.nn.relu(conv)
        return conv