示例#1
0
def gatingsignal2d(x, kernal, phase, height=None, width=None, scope=None):
    """this is simply 1x1x1 convolution, bn, activation,Gating Signal(Query)
    :param x:
    :param kernal:(1,1,1,inputfilters,outputfilters)
    :param phase:
    :param drop:
    :param image_z:
    :param height:
    :param width:
    :param scope:
    :return:
    """
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal,
                               n_inputs=kernal[0] * kernal[1] * kernal[2],
                               n_outputs=kernal[-1],
                               activefunction='relu',
                               variable_name=scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B')
        conv = conv2d(x, W) + B
        conv = normalizationlayer(conv,
                                  is_train=phase,
                                  height=height,
                                  width=width,
                                  norm_type='group',
                                  scope=scope)
        conv = tf.nn.relu(conv)
        return conv
def conv_sigmod(x, kernal, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                               n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'B')
        conv = conv3d(x, W) + B
        conv = tf.nn.sigmoid(conv)
        return conv
def deconv_relu(x, kernal, samefeture=False, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1],
                               n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W')
        B = bias_variable([kernal[-2]], variable_name=scope + 'B')
        conv = deconv3d(x, W, samefeture, True) + B
        conv = tf.nn.relu(conv)
        return conv
def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                               n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B')
        conv = conv3d(x, W) + B
        conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group',
                                  scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop)
        return conv
示例#5
0
def deconv_relu_drop(x, kernalshape, samefeture=False, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape,
                               n_inputs=kernalshape[0] * kernalshape[1] *
                               kernalshape[-1],
                               n_outputs=kernalshape[-2],
                               activefunction='relu',
                               variable_name=str(scope) + 'W')
        B = bias_variable([kernalshape[-2]], variable_name=str(scope) + 'B')
        dconv = tf.nn.relu(deconv2d(x, W, samefeature=samefeture) + B)
        return dconv
示例#6
0
def conv_relu(x, kernalshape, scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape,
                               n_inputs=kernalshape[0] * kernalshape[1] *
                               kernalshape[2],
                               n_outputs=kernalshape[-1],
                               activefunction='relu',
                               variable_name=str(scope) + 'W')
        B = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'B')
        conv = conv2d(x, W) + B
        conv = tf.nn.relu(conv)
        return conv
示例#7
0
    def __init__(self, number_of_classes=2, training_factor=.3, width=32):
        self.width = width
        self.emb_in = tf.placeholder(tf.float32, [None, self.width],
                                     name="emb_in")
        self.category_in = tf.placeholder(tf.float32,
                                          [None, number_of_classes],
                                          name="category")
        self.dropout = tf.placeholder(tf.float32)

        with tf.variable_scope("layer0"):
            self.W0 = layer.weight_variable([self.width, self.width],
                                            name=("layer0_weight"))
            self.b0 = layer.bias_variable([self.width], name=("layer0_bias"))
            self.layer0 = tf.add(tf.matmul(self.emb_in, self.W0), self.b0)
            self.layer0 = tf.tanh(self.layer0)
            self.layer0 = tf.nn.dropout(self.layer0, self.dropout)

        with tf.variable_scope("layer1"):
            self.W1 = layer.weight_variable([self.width, number_of_classes],
                                            name=("layer1_weight"))
            self.b1 = layer.bias_variable([number_of_classes],
                                          name=("layer1_bias"))
            self.layer1 = tf.add(tf.matmul(self.layer0, self.W1), self.b1)

        self.category_out = tf.nn.softmax(self.layer1)

        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=self.category_in,
                                                    logits=self.layer1))
        self.train = tf.train.AdamOptimizer(training_factor).minimize(
            self.loss)

        self.error = 1.0 - tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(self.category_in, 1),
                             tf.argmax(self.layer1, 1)),
                    dtype=tf.float32))
示例#8
0
def conv_bn_relu_drop(x,
                      kernalshape,
                      phase,
                      drop_conv,
                      height=None,
                      width=None,
                      scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernalshape,
                               n_inputs=kernalshape[0] * kernalshape[1] *
                               kernalshape[2],
                               n_outputs=kernalshape[-1],
                               activefunction='relu',
                               variable_name=str(scope) + 'W')
        B = bias_variable([kernalshape[-1]], variable_name=str(scope) + 'B')
        conv = conv2d(x, W) + B
        conv = normalizationlayer(conv,
                                  phase,
                                  height=height,
                                  width=width,
                                  norm_type='group',
                                  scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop_conv)
        return conv
示例#9
0
def attngatingblock(x,
                    g,
                    inputfilters,
                    outfilters,
                    scale_factor,
                    phase,
                    height=None,
                    width=None,
                    scope=None):
    """
    take g which is the spatially smaller signal, do a conv to get the same number of feature channels as x (bigger spatially)
    do a conv on x to also get same feature channels (theta_x)
    then, upsample g to be same size as x add x and g (concat_xg) relu, 1x1x1 conv, then sigmoid then upsample the final -
    this gives us attn coefficients
    :param x:
    :param g:
    :param inputfilters:
    :param outfilters:
    :param scale_factor:2
    :param scope:
    :return:
    """
    with tf.name_scope(scope):
        kernalx = (1, 1, inputfilters, outfilters)
        Wx = weight_xavier_init(shape=kernalx,
                                n_inputs=kernalx[0] * kernalx[1] * kernalx[2],
                                n_outputs=kernalx[-1],
                                activefunction='relu',
                                variable_name=scope + 'conv_Wx')
        Bx = bias_variable([kernalx[-1]], variable_name=scope + 'conv_Bx')
        theta_x = conv2d(x, Wx, scale_factor) + Bx
        kernalg = (1, 1, inputfilters, outfilters)
        Wg = weight_xavier_init(shape=kernalg,
                                n_inputs=kernalg[0] * kernalg[1] * kernalg[2],
                                n_outputs=kernalg[-1],
                                activefunction='relu',
                                variable_name=scope + 'conv_Wg')
        Bg = bias_variable([kernalg[-1]], variable_name=scope + 'conv_Bg')
        phi_g = conv2d(g, Wg) + Bg

        add_xg = resnet_Add(theta_x, phi_g)
        act_xg = tf.nn.relu(add_xg)

        kernalpsi = (1, 1, outfilters, 1)
        Wpsi = weight_xavier_init(shape=kernalpsi,
                                  n_inputs=kernalpsi[0] * kernalpsi[1] *
                                  kernalpsi[2],
                                  n_outputs=kernalpsi[-1],
                                  activefunction='relu',
                                  variable_name=scope + 'conv_Wpsi')
        Bpsi = bias_variable([kernalpsi[-1]],
                             variable_name=scope + 'conv_Bpsi')
        psi = conv2d(act_xg, Wpsi) + Bpsi
        sigmoid_psi = tf.nn.sigmoid(psi)

        upsample_psi = upsample2d(sigmoid_psi,
                                  scale_factor=scale_factor,
                                  scope=scope + "resampler")

        # Attention: upsample_psi * x
        # upsample_psi = layers.Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=4),
        #                              arguments={'repnum': outfilters})(upsample_psi)
        gat_x = tf.multiply(upsample_psi, x)
        kernal_gat_x = (1, 1, outfilters, outfilters)
        Wgatx = weight_xavier_init(shape=kernal_gat_x,
                                   n_inputs=kernal_gat_x[0] * kernal_gat_x[1] *
                                   kernal_gat_x[2],
                                   n_outputs=kernal_gat_x[-1],
                                   activefunction='relu',
                                   variable_name=scope + 'conv_Wgatx')
        Bgatx = bias_variable([kernalpsi[-1]],
                              variable_name=scope + 'conv_Bgatx')
        gat_x_out = conv2d(gat_x, Wgatx) + Bgatx
        gat_x_out = normalizationlayer(gat_x_out,
                                       is_train=phase,
                                       height=height,
                                       width=width,
                                       norm_type='group',
                                       scope=scope)
    return gat_x_out
示例#10
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    layers,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    training=True):
    """
    주어진 파라미터를 이용해서 convolution u-net 그래프 생성 함 
    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size

    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")

            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")

            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.relu(
                tf.layers.batch_normalization(conv1, training=training))
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(
                tf.layers.batch_normalization(conv2, training=training))

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size -= 4
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))

            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat

            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")

            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.relu(
                tf.layers.batch_normalization(conv1, training=training))
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.relu(
                tf.layers.batch_normalization(conv2, training=training))
            up_h_convs[layer] = in_node

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
            size -= 4

    # Output Map
    with tf.name_scope("output_map"):
        weight = weight_variable(
            [1, 1, features_root, 1],
            stddev)  # 불량 CLASS의 MAP만 Loss함수에 들어가므로 channel은 "1"이 됨.
        bias = bias_variable([1], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.squeeze(tf.nn.sigmoid(conv), axis=-1)
        up_h_convs["out"] = output_map

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)