def gatingsignal2d(x, kernal, phase, height=None, width=None, scope=None): """this is simply 1x1x1 convolution, bn, activation,Gating Signal(Query) :param x: :param kernal:(1,1,1,inputfilters,outputfilters) :param phase: :param drop: :param image_z: :param height: :param width: :param scope: :return: """ with tf.name_scope(scope): W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2], n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') conv = conv2d(x, W) + B conv = normalizationlayer(conv, is_train=phase, height=height, width=width, norm_type='group', scope=scope) conv = tf.nn.relu(conv) return conv
def conv_sigmod(x, kernal, scope=None): with tf.name_scope(scope): W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') B = bias_variable([kernal[-1]], variable_name=scope + 'B') conv = conv3d(x, W) + B conv = tf.nn.sigmoid(conv) return conv
def deconv_relu(x, kernal, samefeture=False, scope=None): with tf.name_scope(scope): W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') B = bias_variable([kernal[-2]], variable_name=scope + 'B') conv = deconv3d(x, W, samefeture, True) + B conv = tf.nn.relu(conv) return conv
def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): with tf.name_scope(scope): W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') conv = conv3d(x, W) + B conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', scope=scope) conv = tf.nn.dropout(tf.nn.relu(conv), drop) return conv
def deconv_relu_drop(x, kernalshape, samefeture=False, scope=None): with tf.name_scope(scope): W = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[-1], n_outputs=kernalshape[-2], activefunction='relu', variable_name=str(scope) + 'W') B = bias_variable([kernalshape[-2]], variable_name=str(scope) + 'B') dconv = tf.nn.relu(deconv2d(x, W, samefeature=samefeture) + B) return dconv
def conv_relu(x, kernalshape, scope=None): with tf.name_scope(scope): W = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[2], n_outputs=kernalshape[-1], activefunction='relu', variable_name=str(scope) + 'W') B = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'B') conv = conv2d(x, W) + B conv = tf.nn.relu(conv) return conv
def conv_bn_relu_drop(x, kernalshape, phase, drop_conv, height=None, width=None, scope=None): with tf.name_scope(scope): W = weight_xavier_init(shape=kernalshape, n_inputs=kernalshape[0] * kernalshape[1] * kernalshape[2], n_outputs=kernalshape[-1], activefunction='relu', variable_name=str(scope) + 'W') B = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'B') conv = conv2d(x, W) + B conv = normalizationlayer(conv, phase, height=height, width=width, norm_type='group', scope=scope) conv = tf.nn.dropout(tf.nn.relu(conv), drop_conv) return conv
def attngatingblock(x, g, inputfilters, outfilters, scale_factor, phase, height=None, width=None, scope=None): """ take g which is the spatially smaller signal, do a conv to get the same number of feature channels as x (bigger spatially) do a conv on x to also get same feature channels (theta_x) then, upsample g to be same size as x add x and g (concat_xg) relu, 1x1x1 conv, then sigmoid then upsample the final - this gives us attn coefficients :param x: :param g: :param inputfilters: :param outfilters: :param scale_factor:2 :param scope: :return: """ with tf.name_scope(scope): kernalx = (1, 1, inputfilters, outfilters) Wx = weight_xavier_init(shape=kernalx, n_inputs=kernalx[0] * kernalx[1] * kernalx[2], n_outputs=kernalx[-1], activefunction='relu', variable_name=scope + 'conv_Wx') Bx = bias_variable([kernalx[-1]], variable_name=scope + 'conv_Bx') theta_x = conv2d(x, Wx, scale_factor) + Bx kernalg = (1, 1, inputfilters, outfilters) Wg = weight_xavier_init(shape=kernalg, n_inputs=kernalg[0] * kernalg[1] * kernalg[2], n_outputs=kernalg[-1], activefunction='relu', variable_name=scope + 'conv_Wg') Bg = bias_variable([kernalg[-1]], variable_name=scope + 'conv_Bg') phi_g = conv2d(g, Wg) + Bg add_xg = resnet_Add(theta_x, phi_g) act_xg = tf.nn.relu(add_xg) kernalpsi = (1, 1, outfilters, 1) Wpsi = weight_xavier_init(shape=kernalpsi, n_inputs=kernalpsi[0] * kernalpsi[1] * kernalpsi[2], n_outputs=kernalpsi[-1], activefunction='relu', variable_name=scope + 'conv_Wpsi') Bpsi = bias_variable([kernalpsi[-1]], variable_name=scope + 'conv_Bpsi') psi = conv2d(act_xg, Wpsi) + Bpsi sigmoid_psi = tf.nn.sigmoid(psi) upsample_psi = upsample2d(sigmoid_psi, scale_factor=scale_factor, scope=scope + "resampler") # Attention: upsample_psi * x # upsample_psi = layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=4), # arguments={'repnum': outfilters})(upsample_psi) gat_x = tf.multiply(upsample_psi, x) kernal_gat_x = (1, 1, outfilters, outfilters) Wgatx = weight_xavier_init(shape=kernal_gat_x, n_inputs=kernal_gat_x[0] * kernal_gat_x[1] * kernal_gat_x[2], n_outputs=kernal_gat_x[-1], activefunction='relu', variable_name=scope + 'conv_Wgatx') Bgatx = bias_variable([kernalpsi[-1]], variable_name=scope + 'conv_Bgatx') gat_x_out = conv2d(gat_x, Wgatx) + Bgatx gat_x_out = normalizationlayer(gat_x_out, is_train=phase, height=height, width=width, norm_type='group', scope=scope) return gat_x_out