def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5, padding="VALID")
     result = layer.max_pool(result)  # 12
     result = layer.conv_relu(result, 18, 24, width=5, padding="VALID")
     result = layer.max_pool(result)  # 4
     result = tf.nn.dropout(result, keep_prob)
     return layer.conv(result, 24, 10, width=4, padding="VALID")
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5)
     result = layer.max_pool(result)  # 14
     result = tf.nn.relu(drop_conv(keep_prob, result, 18, 24, width=5))
     result = layer.max_pool(result)  # 7
     result = tf.nn.relu(
         drop_conv(keep_prob, result, 24, 32, width=5, padding="VALID"))
     return layer.conv(result, 32, 10, width=3, padding="VALID")
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5)
     result = layer.resnet_block(result, 18, 3, training, momentum=0.99)
     result = layer.max_pool(result)  # 14
     result = layer.resnet_block(result, 18, 3, training, momentum=0.99)
     result = layer.conv_relu(result, 18, 24, width=5)
     result = layer.resnet_block(result, 24, 3, training, momentum=0.99)
     result = layer.max_pool(result)  # 7
     result = layer.resnet_block(result, 24, 3, training, momentum=0.99)
     result = layer.conv_relu(result, 24, 32, width=5, padding="VALID")
     result = layer.resnet_block(result, 32, 3, training, momentum=0.99)
     result = tf.nn.dropout(result, keep_prob)
     return layer.conv(result, 32, 10, width=3, padding="VALID")
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5)
     result = layer.max_pool(result)  # 14
     result = layer.resnet_block(result, 18, 3, training)
     result = layer.conv_relu(result, 18, 24, width=3)
     result = layer.max_pool(result)  # 7
     result = layer.resnet_block(result, 24, 3, training)
     result = layer.resnet_block(result, 24, 3, training)
     return layer.drop_conv(keep_prob,
                            result,
                            24,
                            10,
                            width=7,
                            padding="VALID")
    def convolve(self, image, training, keep_prob):
        layers = [1, 32, 64]
        width = 28
        conv_window = 3
        feature_layer_size = 128  # maybe 1024
        result = image

        for index in range(len(layers) - 1):
            result = layer.conv_relu(result, layers[index], layers[index + 1],
                                     conv_window)
            result = layer.resnet_block(result,
                                        layers=layers[index + 1],
                                        width=conv_window,
                                        training=training)
            result = layer.resnet_block(result,
                                        layers=layers[index + 1],
                                        width=conv_window,
                                        training=training)
            result = layer.max_pool(result)
            width = int(round(width / 2.0))

        result = layer.conv_relu(result,
                                 layers[-1],
                                 feature_layer_size,
                                 width=width,
                                 padding='VALID')

        h_out = tf.reshape(result, [-1, feature_layer_size])
        h_out_drop = tf.nn.dropout(h_out, keep_prob)
        y = layer.fully_connected(h_out_drop, feature_layer_size, 10)

        return y
示例#6
0
    def forward(self, input_images):
        input_images = tf.reshape(input_images, [-1, 28, 28, 1])
        with tf.variable_scope('conv2d_1'):
            W1 = tf.Variable(tf.random_normal([3, 3, 1, 32]), name='w1')  # 卷积核3x3,输入通道1,输出通道32(卷积核个数)
            L1 = layer.conv2d(input_images, W1, strides=1)
            L1 = layer.max_pool(L1, ksize=2, strides=2)


        with tf.variable_scope('conv2d_2'):
            # 第2层卷积,输入图片数据(?, 14, 14, 32)
            W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01), name='w2')  # 卷积核3x3,输入通道32,输出通道64
            L2 = layer.conv2d(L1, W2, strides=1)
            L2 = layer.max_pool(L2, ksize=2, strides=2)


        with tf.variable_scope('fc'):
            logits = layer.fully_connected(L2, 10)
        return logits
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5, padding="VALID")
     result = layer.max_pool(result)  # 12
     result = layer.resnet_block(result, 18, 3, training)
     result = layer.resnet_block(result, 18, 3, training)
     result = layer.max_pool(result)  # 6
     result = layer.conv_relu(result, 18, 24, width=1)
     result = layer.resnet_narrow(result, 24, 3, training)
     result = layer.resnet_narrow(result, 24, 3, training)
     result = layer.max_pool(result)  # 3
     result = layer.conv_relu(result, 24, 32, width=1)
     result = layer.resnet_narrow(result, 32, 3, training)
     result = layer.resnet_narrow(result, 32, 3, training)
     return layer.drop_conv(keep_prob,
                            result,
                            32,
                            10,
                            width=3,
                            padding="VALID")
示例#8
0
    def forward_unet(self, inp, weights, is_training=True):

        self.conv11 = conv_block(inp,
                                 weights['conv11_weights'],
                                 weights['conv11_biases'],
                                 scope='conv1/bn1',
                                 bn=False,
                                 is_training=is_training)
        self.conv12 = conv_block(self.conv11,
                                 weights['conv12_weights'],
                                 weights['conv12_biases'],
                                 scope='conv1/bn2',
                                 is_training=is_training)
        self.pool11 = max_pool(self.conv12, 2, 2, 2, 2, padding='VALID')
        # 192x192x16
        self.conv21 = conv_block(self.pool11,
                                 weights['conv21_weights'],
                                 weights['conv21_biases'],
                                 scope='conv2/bn1',
                                 is_training=is_training)
        self.conv22 = conv_block(self.conv21,
                                 weights['conv22_weights'],
                                 weights['conv22_biases'],
                                 scope='conv2/bn2',
                                 is_training=is_training)
        self.pool21 = max_pool(self.conv22, 2, 2, 2, 2, padding='VALID')
        # 96x96x32
        self.conv31 = conv_block(self.pool21,
                                 weights['conv31_weights'],
                                 weights['conv31_biases'],
                                 scope='conv3/bn1',
                                 is_training=is_training)
        self.conv32 = conv_block(self.conv31,
                                 weights['conv32_weights'],
                                 weights['conv32_biases'],
                                 scope='conv3/bn2',
                                 is_training=is_training)
        self.pool31 = max_pool(self.conv32, 2, 2, 2, 2, padding='VALID')
        # 48x48x64
        self.conv41 = conv_block(self.pool31,
                                 weights['conv41_weights'],
                                 weights['conv41_biases'],
                                 scope='conv4/bn1',
                                 is_training=is_training)
        self.conv42 = conv_block(self.conv41,
                                 weights['conv42_weights'],
                                 weights['conv42_biases'],
                                 scope='conv4/bn2',
                                 is_training=is_training)
        self.pool41 = max_pool(self.conv42, 2, 2, 2, 2, padding='VALID')
        # 24x24x128
        self.conv51 = conv_block(self.pool41,
                                 weights['conv51_weights'],
                                 weights['conv51_biases'],
                                 scope='conv5/bn1',
                                 is_training=is_training)
        self.conv52 = conv_block(self.conv51,
                                 weights['conv52_weights'],
                                 weights['conv52_biases'],
                                 scope='conv5/bn2',
                                 is_training=is_training)
        # 24x24x256

        ## add upsampling, meanwhile, channel number is reduced to half
        self.deconv6 = deconv_block(self.conv52,
                                    weights['deconv6_weights'],
                                    weights['deconv6_biases'],
                                    scope='deconv/bn6',
                                    is_training=is_training)
        # 48x48x128
        self.sum6 = concat2d(self.deconv6, self.deconv6)
        self.conv61 = conv_block(self.sum6,
                                 weights['conv61_weights'],
                                 weights['conv61_biases'],
                                 scope='conv6/bn1',
                                 is_training=is_training)
        self.conv62 = conv_block(self.conv61,
                                 weights['conv62_weights'],
                                 weights['conv62_biases'],
                                 scope='conv6/bn2',
                                 is_training=is_training)
        # 48x48x128

        self.deconv7 = deconv_block(self.conv62,
                                    weights['deconv7_weights'],
                                    weights['deconv7_biases'],
                                    scope='deconv/bn7',
                                    is_training=is_training)
        # 96x96x64
        self.sum7 = concat2d(self.deconv7, self.deconv7)
        self.conv71 = conv_block(self.sum7,
                                 weights['conv71_weights'],
                                 weights['conv71_biases'],
                                 scope='conv7/bn1',
                                 is_training=is_training)
        self.conv72 = conv_block(self.conv71,
                                 weights['conv72_weights'],
                                 weights['conv72_biases'],
                                 scope='conv7/bn2',
                                 is_training=is_training)
        # 96x96x64

        self.deconv8 = deconv_block(self.conv72,
                                    weights['deconv8_weights'],
                                    weights['deconv8_biases'],
                                    scope='deconv/bn8',
                                    is_training=is_training)
        # 192x192x32
        self.sum8 = concat2d(self.deconv8, self.deconv8)
        self.conv81 = conv_block(self.sum8,
                                 weights['conv81_weights'],
                                 weights['conv81_biases'],
                                 scope='conv8/bn1',
                                 is_training=is_training)
        self.conv82 = conv_block(self.conv81,
                                 weights['conv82_weights'],
                                 weights['conv82_biases'],
                                 scope='conv8/bn2',
                                 is_training=is_training)
        self.conv82_resize = tf.image.resize_images(
            self.conv82, [384, 384],
            method=tf.image.ResizeMethod.BILINEAR,
            align_corners=False)
        # 192x192x32

        self.deconv9 = deconv_block(self.conv82,
                                    weights['deconv9_weights'],
                                    weights['deconv9_biases'],
                                    scope='deconv/bn9',
                                    is_training=is_training)
        # 384x384x16
        self.sum9 = concat2d(self.deconv9, self.deconv9)
        self.conv91 = conv_block(self.sum9,
                                 weights['conv91_weights'],
                                 weights['conv91_biases'],
                                 scope='conv9/bn1',
                                 is_training=is_training)
        self.conv92 = conv_block(self.conv91,
                                 weights['conv92_weights'],
                                 weights['conv92_biases'],
                                 scope='conv9/bn2',
                                 is_training=is_training)
        # 384x384x16

        self.logits = conv_block(self.conv92,
                                 weights['output_weights'],
                                 weights['output_biases'],
                                 scope='outpu/bn',
                                 bn=False,
                                 is_training=is_training)
        #384x384x2

        self.pred_prob = tf.nn.softmax(
            self.logits)  # shape [batch, w, h, num_classes]
        self.pred_compact = tf.argmax(self.pred_prob,
                                      axis=-1)  # shape [batch, w, h]

        self.embeddings = concat2d(self.conv82_resize, self.conv92)

        return self.pred_prob, self.pred_compact, self.embeddings
示例#9
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    layers,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    training=True):
    """
    주어진 파라미터를 이용해서 convolution u-net 그래프 생성 함 
    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size

    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")

            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")

            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.relu(
                tf.layers.batch_normalization(conv1, training=training))
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(
                tf.layers.batch_normalization(conv2, training=training))

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size -= 4
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))

            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat

            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")

            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.relu(
                tf.layers.batch_normalization(conv1, training=training))
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.relu(
                tf.layers.batch_normalization(conv2, training=training))
            up_h_convs[layer] = in_node

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
            size -= 4

    # Output Map
    with tf.name_scope("output_map"):
        weight = weight_variable(
            [1, 1, features_root, 1],
            stddev)  # 불량 CLASS의 MAP만 Loss함수에 들어가므로 channel은 "1"이 됨.
        bias = bias_variable([1], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.squeeze(tf.nn.sigmoid(conv), axis=-1)
        up_h_convs["out"] = output_map

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
示例#10
0
    def model(self, image_batch=None, label_batch=None):
        """创建网络graph"""
        # 1st Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv1 = layer.conv_block(image_batch,
                                      11,
                                      11,
                                      64,
                                      2,
                                      2,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block1')
        self.pool1 = layer.max_pool(self.conv1,
                                    3,
                                    3,
                                    2,
                                    2,
                                    padding='SAME',
                                    name='pool1')

        # 2nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv2 = layer.conv_block(self.pool1,
                                      7,
                                      7,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block2')
        self.pool2 = layer.max_pool(self.conv2,
                                    3,
                                    3,
                                    2,
                                    2,
                                    padding='SAME',
                                    name='pool2')

        # 3nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv3 = layer.conv_block(self.pool2,
                                      5,
                                      5,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block3')
        self.pool3 = layer.max_pool(self.conv3,
                                    3,
                                    3,
                                    1,
                                    1,
                                    padding='SAME',
                                    name='pool3')

        # 3nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv4 = layer.conv_block(self.pool3,
                                      3,
                                      3,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block4')
        self.pool4 = layer.max_pool(self.conv4,
                                    3,
                                    3,
                                    1,
                                    1,
                                    padding='SAME',
                                    name='pool4')

        # 5th Layer: ffully connected-BatchNorm-ReLU-> Dropout
        self.fc1 = layer.fc(self.pool4,
                            256,
                            initializer=self.initializer,
                            relu=True,
                            is_training=self.is_training,
                            norm=self.norm,
                            name='fc1')
        self.dropout1 = layer.dropout(self.fc1,
                                      self.keep_prob,
                                      name='dropout1')

        # 6th Layer: fully connected layer
        self.fc2 = layer.fc(self.dropout1,
                            10,
                            initializer=self.initializer,
                            relu=False,
                            is_training=self.is_training,
                            norm=None,
                            name='fc2')

        if not label_batch == None:
            loss = self.netloss(self.fc2, label_batch)
            correct_prediction = tf.equal(tf.argmax(self.fc2, 1), label_batch)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return loss, accuracy
            #return loss,accuracy,self.fc2,tf.argmax(self.fc2,1),label_batch
        else:
            #prediction时,label=None,无需返回
            #return self.fc2
            return tf.argmax(self.fc2, 1)