예제 #1
0
def decoder(x):
    """Create decoder given placeholder input tensor."""
    # Decoding layer 1
    with tf.name_scope('decoder1'):
        with tf.name_scope('weights'):
            weights1 = weight_variable([64, 512], stddev=0.1)
            variable_summaries(weights1)
        with tf.name_scope('biases'):
            biases1 = bias_variable([512], init_val=0.1)
        layer1 = fc_layer(x, weights1, biases1)

    # Decoding layer 2
    with tf.name_scope('decoder2'):
        with tf.name_scope('weights'):
            weights2 = weight_variable([512, 2048], stddev=0.01)
            variable_summaries(weights1)
        with tf.name_scope('biases'):
            biases2 = bias_variable([2048], init_val=0.01)
        layer2 = fc_layer(layer1, weights2, biases2)

    # Decoding layer 3
    with tf.name_scope('decoder3'):
        with tf.name_scope('weights'):
            weights3 = weight_variable(
                [2048, INPUT_WIDTH * INPUT_HEIGHT * NUM_CHANNELS], stddev=0.01)
            variable_summaries(weights2)
        with tf.name_scope('biases'):
            biases3 = bias_variable(
                [INPUT_WIDTH * INPUT_HEIGHT * NUM_CHANNELS], init_val=0.01)
        layer3 = fc_layer(layer2, weights3, biases3)
    return layer3
예제 #2
0
def encoder(x):
    """Create encoder given placeholder input tensor."""
    # Encoding layer 1
    with tf.name_scope('encoder1'):
        with tf.name_scope('weights'):
            weights1 = weight_variable(
                [INPUT_WIDTH * INPUT_HEIGHT * NUM_CHANNELS, 2048], stddev=0.01)
            variable_summaries(weights1)
        with tf.name_scope('biases'):
            biases1 = bias_variable([2048], init_val=0.01)
        layer1 = fc_layer(x, weights1, biases1)

    # Encoding layer 2
    with tf.name_scope('encoder2'):
        with tf.name_scope('weights'):
            weights2 = weight_variable([2048, 512], stddev=0.01)
            variable_summaries(weights1)
        with tf.name_scope('biases'):
            biases2 = bias_variable([512], init_val=0.01)
        layer2 = fc_layer(layer1, weights2, biases2)

    # Mu encoder layer
    with tf.name_scope('mu_encoder'):
        with tf.name_scope('weights'):
            weights_mu = weight_variable([512, 64], stddev=0.1)
            variable_summaries(weights_mu)
        with tf.name_scope('biases'):
            biases_mu = bias_variable([64], init_val=0.1)
        mu_encoder = fc_layer(layer2, weights_mu, biases_mu)

    # Log(sigma) encoder layer
    with tf.name_scope('log_sigma_encoder'):
        with tf.name_scope('weights'):
            weights_log_sigma = weight_variable([512, 64], stddev=0.1)
            variable_summaries(weights_log_sigma)
        with tf.name_scope('biases'):
            biases_log_sigma = bias_variable([64], init_val=0.1)
        log_sigma_encoder = fc_layer(layer2, weights_log_sigma,
                                     biases_log_sigma)

    # Sample epsilon, a truncated normal tensor
    epsilon = tf.truncated_normal(tf.shape(log_sigma_encoder))

    # Sample latent variables
    with tf.name_scope('latent_layer'):
        std_encoder = tf.exp(log_sigma_encoder)
        z = tf.add(mu_encoder, tf.multiply(std_encoder, epsilon))
        variable_summaries(z)
    return mu_encoder, log_sigma_encoder, epsilon, z
예제 #3
0
def conv_bn_relu_drop(x,
                      kernal,
                      phase,
                      drop,
                      image_z=None,
                      height=None,
                      width=None,
                      scope=None):
    with tf.name_scope(scope):
        W = weight_init(shape=kernal,
                        n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                        n_outputs=kernal[-1],
                        activefunction='relu',
                        variable_name=scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B')
        conv = conv3d(x, W) + B
        conv = normalizationlayer(conv,
                                  is_train=phase,
                                  height=height,
                                  width=width,
                                  image_z=image_z,
                                  norm_type='group',
                                  scope=scope)
        conv = tf.nn.dropout(tf.nn.relu(conv), drop)
        return conv
예제 #4
0
def dsc(in_channels=1, out_channels=2, input_side_length=256, depth=512, filter_depth=256, filter_width=3, sparse_labels=True, batch_size=None):

    with tf.name_scope('inputs'):

        shape = [batch_size, input_side_length, input_side_length, in_channels]
        inputs = tf.placeholder(tf.float32, shape=shape, name='inputs')

        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32, shape=(batch_size, input_side_length, input_side_length), name='labels')
        else:
            shape = [batch_size, input_side_length, input_side_length, out_channels]
            ground_truth = tf.placeholder(tf.float32, shape=shape, name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')

    network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    layer_out = layers.densely_stacked_column(network_input, depth, filter_depth, filter_width, output_depth=filter_depth, data_format="NCHW")

    weights = layers.weight_variable([filter_width, filter_width, filter_depth, out_channels], stddev=np.sqrt(2. / (filter_width * filter_width * filter_depth)))
    logits = tf.nn.conv2d(layer_out, weights, strides=[1, 1, 1, 1], padding="SAME", data_format="NCHW", name="conv")
    logits += layers.bias_variable([2, 1, 1])

    logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob
예제 #5
0
    def CNN_baseline(self,x,keep_prob,layers =LAYERS,filters_nb=FILTERS_NB, filters_widths=FILTERS_WIDTH):
        """
        Creates a new convolutional unet for the given parametrization.

        :param x: input tensor, shape [?,nx,ny,channels_in]
        :param keep_prob: probability for dropout
        :param layers: number of layers of the network
        :param filters_nb: number of filters for each layer
        :param filters_widths: size of the filter for each layer
        """

        logging.info("Layers {layers},Number of filters {filters_nb}, filter size {filters_widths}".format(layers=layers,filters_nb=filters_nb,
                                                                                                     filters_widths=filters_widths))
        # Placeholder for the input image
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1,nx,ny,INPUT_SIZE]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]


        weights = []
        biases = []
        convs = []
        h_convs = OrderedDict()

        # Patch extraction and Non linear Mapping
        for layer in range(0,layers):
            stddev=np.sqrt(2/(filters_nb[layer+1]*filters_widths[layer]**2))
            w=weight_variable([filters_widths[layer], filters_widths[layer], filters_nb[layer], filters_nb[layer+1]], stddev)
            b = bias_variable([filters_nb[layer+1]])
            conv_ = conv2d(in_node, w,keep_prob)

        #ORIGINAL VERION FROM SRCNNN
            if layer<(layers-1):
                h_convs[layer] = tf.nn.relu(conv_ + b)
                in_node = h_convs[layer]
            else:
                output_map=conv_ + b 
            weights.append(w)
            biases.append(b)
            convs.append(conv_)


   
        h_convs["out"] = output_map


        variables = []
        for w in weights:
            variables.append(w)

        for b in biases:
            variables.append(b)



        return output_map, variables
예제 #6
0
def conv_sigmod(x, kernal, scope=None):
    with tf.name_scope(scope):
        W = weight_init(shape=kernal,
                        n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3],
                        n_outputs=kernal[-1],
                        activefunction='sigomd',
                        variable_name=scope + 'W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'B')
        conv = conv3d(x, W) + B
        conv = tf.nn.sigmoid(conv)
        return conv
예제 #7
0
def deconv_relu(x, kernal, samefeture=False, scope=None):
    with tf.name_scope(scope):
        W = weight_init(shape=kernal,
                        n_inputs=kernal[0] * kernal[1] * kernal[2] *
                        kernal[-1],
                        n_outputs=kernal[-2],
                        activefunction='relu',
                        variable_name=scope + 'W')
        B = bias_variable([kernal[-2]], variable_name=scope + 'B')
        conv = deconv3d(x, W, samefeture, True) + B
        conv = tf.nn.relu(conv)
        return conv
예제 #8
0
def uNet(input_shape=None,
         layers=3,
         stride=1,
         pool=2,
         learn_rate=1.0e-4,
         epochs=10e4,
         train_size=3,
         input_shape=None,
         channels=3,
         classes=2,
         filter=5):

    features = [channels, 32, 64, 128, 256, 512, 800, classes]
    x = tf.placeholder(tf.float32, [None, None, None, features[0]])
    y_ = tf.placeholder(tf.int32, [None, None, None, 1])

    # Creating wegihts for each layer and biases
    W_conv1 = layers.weight_variable(
        [filter, filter, features[0], features[1]])
    b_conv1 = layers.bias_variable([features[1]])

    W_conv1 = layers.weight_variable(
        [filter, filter, features[1], features[2]])
    b_conv1 = layers.bias_variable([features[2]])

    W_conv1 = layers.weight_variable(
        [filter, filter, features[2], features[3]])
    b_conv1 = layers.bias_variable([features[3]])

    W_conv1 = layers.weight_variable(
        [filter, filter, features[3], features[4]])
    b_conv1 = layers.bias_variable([features[4]])

    W_conv1 = layers.weight_variable(
        [filter, filter, features[4], features[5]])
    b_conv1 = layers.bias_variable([features[5]])
예제 #9
0
# input training data

train_images = cdl.GetTrainDataByLabel("data")
train_labels = cdl.GetTrainDataByLabel("labels")

test_images = cdl.GetTestDataByLabel("data")
test_labels = cdl.GetTestDataByLabel("labels")

input_images = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))  # bfloat16
input_labels = tf.placeholder(tf.float32, shape=(None, 10))  # one-hot encoding

# ResNet 是 add  , DenseNet 是 concatenate

# section 1
W_conv1 = layers.weight_variable([7, 7, 3, 16])
b_conv1 = layers.bias_variable([16])
h_conv1 = tf.nn.relu(conv2d(input_images, W_conv1) + b_conv1)
h_pool1 = layers.max_pool(h_conv1, 3, 2)

# Dense Block 1

# Transition Layer 1

# Dense Block 2

# Trainsition Layer 2

# Dense Block 3

# Trainsition Layer 3
예제 #10
0
def unet(in_channels=1,
         out_channels=2,
         start_filters=64,
         side_length=572,
         depth=4,
         convolutions=2,
         filter_size=3,
         sparse_labels=True,
         batch_size=1):
    """
    Creates the graph for the standard U-Net and sets up the appropriate input and output placeholder.

    Parameters
    ----------
    in_channels: int
        The depth of the input.
    out_channels: int
        The depth of number of classes of the output.
    start_filters : int
        The number of filters in the first convolution.
    side_length: int
        The side length of the square input.
    depth: int
        The depth of the U-part of the network. This is equal to the number of max-pooling layers.
    convolutions: int
        The number of convolutions in between max-pooling layers on the down-path and in between up-convolutions on the up-path.
    filter_size: int
        The width and height of the filter. The receptive field.
    sparse_labels: bool
        If true, the labels are integers, one integer per pixel, denoting the class that that pixel belongs to. If false, labels are one-hot encoded.
    batch_size: int
        The training batch size.

    Returns
    -------
    inputs : TF tensor
        The network input.
    logits: TF tensor
        The network output before SoftMax.
    ground_truth: TF tensor
        The desired output from the ground truth.
    keep_prob: TF float
        The TF variable holding the keep probability for drop out layers.  
    """

    pool_size = 2
    padding = "SAME"

    # Define inputs and helper functions #
    with tf.variable_scope('inputs'):
        inputs = tf.placeholder(tf.float32,
                                shape=(batch_size, side_length, side_length,
                                       in_channels),
                                name='inputs')
        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32,
                                          shape=(batch_size, side_length,
                                                 side_length),
                                          name='labels')
        else:
            ground_truth = tf.placeholder(tf.float32,
                                          shape=(batch_size, side_length,
                                                 side_length, out_channels),
                                          name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')

        network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    # [conv -> conv -> max pool -> drop out] + parameter updates
    def step_down(name, _input):

        with tf.variable_scope(name):
            conv_out = layers.conv_block(_input,
                                         filter_size,
                                         channel_multiplier=2,
                                         convolutions=convolutions,
                                         padding=padding,
                                         data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            result = layers.dropout(pool_out, keep_prob)

        return result, conv_out

    # parameter updates + [upconv and concat -> drop out -> conv -> conv]
    def step_up(name, bottom_input, side_input):

        with tf.variable_scope(name):
            concat_out = layers.upconv_concat_block(bottom_input,
                                                    side_input,
                                                    data_format="NCHW")
            drop_out = layers.dropout(concat_out, keep_prob)
            result = layers.conv_block(drop_out,
                                       filter_size,
                                       channel_multiplier=0.5,
                                       convolutions=convolutions,
                                       padding=padding,
                                       data_format="NCHW")

        return result

    # Build the network #

    with tf.variable_scope('contracting'):

        # Set initial parameters
        outputs = []

        # Build contracting path
        with tf.variable_scope("step_0"):
            conv_out = layers.conv_block(network_input,
                                         filter_size,
                                         out_filters=start_filters,
                                         convolutions=convolutions,
                                         padding=padding,
                                         data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            current_tensor = layers.dropout(pool_out, keep_prob)
            outputs.append(conv_out)

        for i in xrange(1, depth):
            current_tensor, conv_out = step_down("step_" + str(i),
                                                 current_tensor)
            outputs.append(conv_out)

    # Bottom [conv -> conv]
    with tf.variable_scope("step_" + str(depth)):
        current_tensor = layers.conv_block(current_tensor,
                                           filter_size,
                                           channel_multiplier=2,
                                           convolutions=convolutions,
                                           padding=padding,
                                           data_format="NCHW")

    with tf.variable_scope("expanding"):

        # Set initial parameter
        outputs.reverse()

        # Build expanding path
        for i in xrange(depth):
            current_tensor = step_up("step_" + str(depth + i + 1),
                                     current_tensor, outputs[i])

    # Last layer is a 1x1 convolution to get the predictions
    # We don't want an activation function for this one (softmax will be applied later), so we're doing it manually
    in_filters = current_tensor.shape.as_list()[1]
    stddev = np.sqrt(2. / in_filters)

    with tf.variable_scope("classification"):

        weight = layers.weight_variable([1, 1, in_filters, out_channels],
                                        stddev,
                                        name="weights")
        bias = layers.bias_variable([out_channels, 1, 1], name="biases")

        conv = tf.nn.conv2d(current_tensor,
                            weight,
                            strides=[1, 1, 1, 1],
                            padding="VALID",
                            name="conv",
                            data_format="NCHW")
        logits = conv + bias

        logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob
예제 #11
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
        Creates a new convolutional unet for the given parametrization.
        :param x: input tensor, shape [?,nx,ny,channels]
        :param keep_prob: dropout probability tensor
        :param channels: number of channels in the input image
        :param n_class: number of output labels
        :param layers: number of layers in the net
        :param features_root: number of features in the first layer
        :param filter_size: size of the convolution filter
        :param pool_size: size of the max pooling operation
        :param summaries: Flag if summaries should be created
        """
    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 128
    size = in_size
    # Down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root  # output features number
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Set weights and bias of 2 convolution
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")
            # Build 2conv model
            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.leaky_relu(conv2)
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))
            # Do pooling and calculate image processing size
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]
    # Up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Up convolution and skip connection    # shape[kernelx, kernely, out features, in features]
            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.leaky_relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat
            # Set weights and bias of 2 convolution
            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")
            # Build 2conv model
            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.leaky_relu(conv2)
            up_h_convs[layer] = in_node
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
    # Output map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.leaky_relu(conv)
        up_h_convs["out"] = output_map

    # blur map
    with tf.name_scope("output_blur"):
        weight = weight_variable([1, 1, features_root, 1], stddev)
        bias = bias_variable([1], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_blur = tf.nn.leaky_relu(conv)
        up_h_convs["blur"] = output_blur

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)
    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, output_blur, variables, int(in_size - size)
예제 #12
0
def inference(x):
    #conv layer 1
    with tf.name_scope('conv1') as scope:
        w = layers.xavier_weights_variable([3, 3, 3, 128])
        b = layers.bias_variable([128])
        conv1 = tf.nn.relu(layers.conv2d(x, w) + b, name="activations")

    #conv layer 2
    with tf.name_scope('conv2') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv2 = tf.nn.relu(layers.conv2d(conv1, w) + b, name="activations")

    #maxpool1, images now 32x32
    with tf.name_scope('pool1') as scope:
        pool1 = layers.max_pool_2x2(conv2)

    #conv layer 3
    with tf.name_scope('conv3') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv3 = layers.tf.nn.relu(layers.conv2d(pool1, w) + b,
                                  name="activations")

    #conv layer 4
    with tf.name_scope('conv4') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv4 = tf.nn.relu(layers.conv2d(conv3, w) + b, name="activations")

    #maxpool2, images now 16x16
    with tf.name_scope('pool2') as scope:
        pool2 = layers.max_pool_2x2(conv4)

    #conv layer 5
    with tf.name_scope('conv5') as scope:
        w = layers.xavier_weights_variable([3, 3, 128, 128])
        b = layers.bias_variable([128])
        conv5 = tf.nn.relu(layers.conv2d(pool2, w) + b, name="activations")

    #maxpool3, imags now 8x8
    with tf.name_scope('pool3') as scope:
        pool3 = layers.max_pool_2x2(conv5)

    #fully connected layer 1
    with tf.name_scope('fully_connected1') as scope:
        w = layers.xavier_weights_variable([8 * 8 * 128, 400])
        b = layers.bias_variable([400])
        pool3_flat = tf.reshape(pool3, [-1, 8 * 8 * 128])
        fully_connected1 = tf.nn.relu(tf.matmul(pool3_flat, w) + b,
                                      name="activations")

    #fully connected layer 2
    with tf.name_scope('fully_connected2') as scope:
        w = layers.xavier_weights_variable([400, 400])
        b = layers.bias_variable([400])
        fully_connected2 = tf.nn.relu(tf.matmul(fully_connected1, w) + b,
                                      name="activations")

    #fully connected layer 3
    with tf.name_scope('fully_connected3') as scope:
        w = layers.xavier_weights_variable([400, 200])
        b = layers.bias_variable([200])
        y_pred = tf.matmul(fully_connected2, w) + b
    return y_pred
예제 #13
0
def create_conv_net(x,
                    keep_prob,
                    channels_in,
                    channels_out,
                    n_class,
                    layers=2,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels_in]
    :param keep_prob: dropout probability tensor
    :param channels_in: number of channels in the input image
    :param channels_out: number of channels in the output image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels_in]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels_in, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:  #because after it's the end of the U
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_h_convs[
        layers - 1]  #it's the last layer the bottom of the U but it's because
    #of the definition of range we have layers -1 and not layers

    # up layers
    for layer in range(layers - 2, -1,
                       -1):  #we don't begin at the bottom of the U
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])  # weights and bias for upsampling
        #from a layer to another !!
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        #recall that in_node is the last layer
        #bottom of the U

        h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)  #layer
        #before the bottom of the  U
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_h_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class * channels_out],
                             stddev)
    bias = bias_variable([n_class * channels_out])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_h_convs["out"] = output_map

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
예제 #14
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """ Creates a new convolutional unet for the given parametrization.
    ### Params:
        * x - Tensor: shape [batch_size, height, width, channels]
        * keep_prob - float: dropout probability
        * channels - integer: number of channels in the input image
        * n_class - integer: number of output labels
        * layers - integer: number of layers in the net
        * features_root - integer: number of features in the first layer
        * filter_size - integer: size of the convolution filter
        * pool_size - integer: size of the max pooling operation
        * summaries - bool: Flag if summaries should be created
    """

    logger.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))

    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_convs = OrderedDict()
    up_convs = OrderedDict()

    # Record the size difference
    in_size = 1000
    size = in_size

    # Encode
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:
            pools[layer] = max_pool(dw_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_convs[layers - 1]

    # Decode
    for layer in range(layers - 2, -1, -1):
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        h_deconv_concat = crop_and_concat(dw_convs[layer], h_deconv)
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class], stddev)
    bias = bias_variable([n_class])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_convs["out"] = output_map

    # Summary the results of convolution and pooling
    if summaries:
        with tf.name_scope("summary_conv"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('layer_%02d_01' % i, get_image_summary(c1))
                tf.summary.image('layer_%02d_02' % i, get_image_summary(c2))

        with tf.name_scope("summary_max_pooling"):
            for k in pools.keys():
                tf.summary.image('pool_%02d' % k, get_image_summary(pools[k]))

        with tf.name_scope("summary_deconv"):
            for k in deconv.keys():
                tf.summary.image('deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

        with tf.name_scope("down_convolution"):
            for k in dw_convs.keys():
                tf.summary.histogram("layer_%02d" % k + '/activations',
                                     dw_convs[k])

        with tf.name_scope("up_convolution"):
            for k in up_convs.keys():
                tf.summary.histogram("layer_%s" % k + '/activations',
                                     up_convs[k])

    # Record all the variables which can be used in L2 regularization
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
예제 #15
0
def create_UNet_edge_xz_yz(x,
                           keep_prob,
                           channels,
                           n_class,
                           layers=5,
                           features_root=32,
                           summaries=True,
                           training=True):
    # Inception-conv UNet with deep supervision for coronal and transverse plane
    logging.info("Layers {layers}, features {features}".format(
        layers=layers, features=features_root))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    fat_inputs = OrderedDict()
    fat_pools = OrderedDict()
    fat_dw_h_convs = OrderedDict()
    deconv = OrderedDict()
    up_h_convs = OrderedDict()

    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            if layer == 0:
                conv = inception_conv(in_node, 4, features, keep_prob,
                                      training)
            else:
                conv = inception_conv(in_node, features // 2, features,
                                      keep_prob, training)

            conv = cSE_layer(conv,
                             features,
                             ratio=4,
                             name="down_conv_{}".format(layer))
            fat_dw_h_convs[layer] = conv

            if layer < layers - 1:
                if layer == 0 or layer == 2:
                    fat_pools[layer] = max_pool_xz(conv, 1, 2)
                elif layer == 1 or layer == 3:
                    fat_pools[layer] = max_pool_xz(conv, 2, 2)
                in_node = fat_pools[layer]

    in_node = fat_dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            if layer == 3 or layer == 1:
                h_deconv = deconv2d_xz(in_node, features, features // 2, 2, 2,
                                       training)
            else:
                h_deconv = deconv2d_xz(in_node, features, features // 2, 1, 2,
                                       training)
            h_deconv_concat = tf.concat([h_deconv, fat_dw_h_convs[layer]], 3)

            deconv[layer] = h_deconv_concat
            in_node = inception_conv(h_deconv_concat, features, features // 2,
                                     keep_prob, training)
            in_node = cSE_layer(in_node,
                                features // 2,
                                ratio=4,
                                name="up_conv_{}".format(layer))
            up_h_convs[layer] = in_node

    stddev = np.sqrt(2 / (3**2 * features_root))
    w = weight_variable([3, 3, features_root, 2], stddev, name="w")
    b = bias_variable([2], name="b")
    up_h_convs["out"] = tf.nn.bias_add(
        tf.nn.conv2d(up_h_convs[0], w, strides=[1, 1, 1, 1], padding="SAME"),
        b)

    # RCF
    for layer in range(2, 0, -1):
        with tf.name_scope("output_{}".format(str(layer))):
            in_node = up_h_convs[layer]
            conv = conv2d_2(in_node, features_root * 2**layer, 2, keep_prob)
            deconv = deconv2d_xz_2(conv,
                                   in_dim=2,
                                   out_dim=2,
                                   larger1=1,
                                   larger2=2**layer,
                                   training=training)
            up_h_convs["out_{}".format(layer)] = deconv

    in_node = tf.concat(
        [up_h_convs["out"], up_h_convs["out_1"], up_h_convs["out_2"]], 3)
    w_out = weight_variable([1, 1, 6, 2], stddev, name="w_out")
    b_out = bias_variable([2], name="b_out")
    output_map = tf.nn.bias_add(
        tf.nn.conv2d(in_node, w_out, strides=[1, 1, 1, 1], padding="SAME"),
        b_out)

    if summaries:
        with tf.name_scope("summaries"):
            for k in fat_pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(fat_pools[k]))

            for k in fat_dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations',
                    fat_dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return [
        output_map, up_h_convs["out"], up_h_convs["out_1"], up_h_convs["out_2"]
    ], variables
예제 #16
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=5,
                    features_root=32,
                    summaries=True,
                    training=True):
    # Conventinal UNet
    logging.info("Layers {layers}, features {features}".format(
        layers=layers, features=features_root))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    # rmvd train
    in_node, excitation = rmvd_layer(in_node, 8, 2, name="rmvd_training")

    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            if layer == 0:
                conv1 = conv2d(in_node, channels, features, keep_prob,
                               training)
            else:
                conv1 = conv2d(in_node, features // 2, features, keep_prob,
                               training)

            conv2 = conv2d(conv1, features, features, keep_prob, training)
            dw_h_convs[layer] = conv2

            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], 2)
                in_node = pools[layer]

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            h_deconv = deconv2d(in_node, features, features // 2, training)
            h_deconv_concat = tf.concat([dw_h_convs[layer], h_deconv], 3)
            deconv[layer] = h_deconv_concat

            conv1 = conv2d(h_deconv_concat, features, features // 2, keep_prob,
                           training)
            conv2 = conv2d(conv1, features // 2, features // 2, keep_prob,
                           training)
            in_node = conv2
            up_h_convs[layer] = in_node

    stddev = np.sqrt(2 / (3**2 * features_root))
    w = weight_variable([3, 3, features_root, 2], stddev, name="w")
    b = bias_variable([2], name="b")
    output_map = tf.nn.bias_add(
        tf.nn.conv2d(up_h_convs[0], w, strides=[1, 1, 1, 1], padding="SAME"),
        b)
    up_h_convs["out"] = output_map

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return [output_map], variables
예제 #17
0
def IPN(x,
        PLM_NUM=5,
        filter_size=[3, 3, 3],
        LAYER_NUM=3,
        NUM_OF_CLASS=2,
        pooling_size=[]):
    """
    :param x: input tensor,shape[?,nx,ny,nz,channels]
    :param filter_size: size of the convolution filer
    :param PLM_NUM: number of PLM
    :param LAYER_NUM: number of conv layers in each PLM
    :return:
    """
    #Initialize Variable
    #whether use pooling size by automatic or by yourself
    if not pooling_size:
        pooling_size = utils.cal_downsampling_size_combine(x.shape[1], PLM_NUM)
    else:
        PLM_NUM = len(pooling_size)

    W = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    b = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    conv = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    pool = [[0] * LAYER_NUM for i in range(PLM_NUM)]
    variables = []

    #features = utils.cal_channel_num(PLM_NUM)
    features = np.ones(PLM_NUM, dtype='int32') * 64
    ##################### print model para  #############
    print('')
    print('-----------------  model paras ------------------')
    resize = 1
    print('PLM DS SIZE: ', end='')
    for index in pooling_size:
        resize *= index
    print('{}->{} = '.format(x.shape[1], resize), end='')
    for i, s in enumerate(pooling_size):
        if i == 0:
            print(str(s), end='')
        else:
            print('x' + str(s), end='')

    print('')
    print('conv channel nums : ', end='')
    for f in features:
        print(f, ',', end='')
    print('')
    print('---------------------  end ----------------------')
    print('')
    ######################################################

    features_count = -1
    stddev = 0.02

    #Build Projection Learning Module
    for PLM in range(PLM_NUM):
        features_count += 1
        if PLM == 0:
            input = x
        else:
            input = pool[PLM - 1]
        for LAYER in range(LAYER_NUM):
            b[PLM][LAYER] = bias_variable([features[features_count]],
                                          name="b{}".format(PLM + 1))
            in_channels = input.get_shape().as_list()[-1]
            W[PLM][LAYER] = weight_variable(
                filter_size + [in_channels, features[features_count]],
                stddev,
                name="w{}_{}".format(PLM + 1, LAYER + 1))
            variables.append(W[PLM][LAYER])
            variables.append(b[PLM][LAYER])
            conv[PLM][LAYER] = tf.nn.relu(
                conv3d(input, W[PLM][LAYER], b[PLM][LAYER]))
            if LAYER == LAYER_NUM - 1:
                pool[PLM] = Unidirectional_pool(
                    conv[PLM][LAYER], pooling_size[PLM])  #Unidirectional_pool
            else:
                input = conv[PLM][LAYER]

    #Output MAP
    Wop = weight_variable(filter_size +
                          [features[features_count], NUM_OF_CLASS],
                          stddev,
                          name="w_output")
    bop = bias_variable([NUM_OF_CLASS], name="b_output")
    output = tf.nn.relu(
        tf.nn.bias_add(
            tf.nn.conv3d(pool[PLM_NUM - 1],
                         Wop,
                         strides=[1, 1, 1, 1, 1],
                         padding="SAME"), bop))

    sf = tf.nn.softmax(output)
    pred = tf.argmax(sf, axis=-1, name="prediction")
    return output, pred, variables, sf
예제 #18
0
def parameter_efficient(in_channels=1,
                        out_channels=2,
                        start_filters=64,
                        input_side_length=256,
                        depth=4,
                        res_blocks=2,
                        filter_size=3,
                        sparse_labels=True,
                        batch_size=1,
                        activation="cReLU",
                        batch_norm=True):
    """
    Creates the graph for the parameter efficient variant of the U-Net and sets up the appropriate input and output placeholder.

    Parameters
    ----------
    in_channels: int
        The depth of the input.
    out_channels: int
        The depth of number of classes of the output.
    start_filters : int
        The number of filters in the first convolution.
    input_side_length: int
        The side length of the square input.
    depth: int
        The depth of the U-part of the network. This is equal to the number of max-pooling layers.
    res_blocks: int
        The number of residual blocks in between max-pooling layers on the down-path and in between up-convolutions on the up-path.
    filter_size: int
        The width and height of the filter. The receptive field.
    sparse_labels: bool
        If true, the labels are integers, one integer per pixel, denoting the class that that pixel belongs to. If false, labels are one-hot encoded.
    batch_size: int
        The training batch size.
    activation: string
        Either "ReLU" for the standard ReLU activation or "cReLU" for the concatenated ReLU activation function.
    batch_norm: bool
        Whether to use batch normalization or not.

    Returns
    -------
    inputs : TF tensor
        The network input.
    logits: TF tensor
        The network output before SoftMax.
    ground_truth: TF tensor
        The desired output from the ground truth.
    keep_prob: TF float
        The TF variable holding the keep probability for drop out layers.
    training_bool: TF bool
        The TF variable holding the boolean value, which switches batch normalization to training or inference mode.    
    """

    activation = str.lower(activation)
    if activation not in ["relu", "crelu"]:
        raise ValueError("activation must be \"ReLU\" or \"cReLU\".")

    pool_size = 2

    # Define inputs and helper functions #

    with tf.variable_scope('inputs'):
        inputs = tf.placeholder(tf.float32,
                                shape=(batch_size, input_side_length,
                                       input_side_length, in_channels),
                                name='inputs')
        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32,
                                          shape=(batch_size, input_side_length,
                                                 input_side_length),
                                          name='labels')
        else:
            ground_truth = tf.placeholder(tf.float32,
                                          shape=(batch_size, input_side_length,
                                                 input_side_length,
                                                 out_channels),
                                          name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')
        training = tf.placeholder(tf.bool, shape=[], name="training")

        network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    # [conv -> conv -> max pool -> drop out] + parameter updates
    def step_down(name,
                  input_,
                  filter_size=3,
                  res_blocks=2,
                  keep_prob=1.,
                  training=False):

        with tf.variable_scope(name):

            with tf.variable_scope("res_block_0"):
                conv_out, tiled_input = layers.res_block(
                    input_,
                    filter_size,
                    channel_multiplier=2,
                    depthwise_multiplier=2,
                    convolutions=2,
                    training=training,
                    activation=activation,
                    batch_norm=batch_norm,
                    data_format="NCHW")

            for i in xrange(1, res_blocks):
                with tf.variable_scope("res_block_" + str(i)):
                    conv_out = layers.res_block(conv_out,
                                                filter_size,
                                                channel_multiplier=1,
                                                depthwise_multiplier=2,
                                                convolutions=2,
                                                training=training,
                                                activation=activation,
                                                batch_norm=batch_norm,
                                                data_format="NCHW")

            conv_out = conv_out + tiled_input

            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")

            bottom_out = layers.dropout(pool_out, keep_prob)
            side_out = layers.dropout(conv_out, keep_prob)

        return bottom_out, side_out

    # parameter updates + [upconv and concat -> drop out -> conv -> conv]
    def step_up(name,
                bottom_input,
                side_input,
                filter_size=3,
                res_blocks=2,
                keep_prob=1.,
                training=False):

        with tf.variable_scope(name):
            added_input = layers.upconv_add_block(bottom_input,
                                                  side_input,
                                                  data_format="NCHW")

            conv_out = added_input
            for i in xrange(res_blocks):
                with tf.variable_scope("res_block_" + str(i)):
                    conv_out = layers.res_block(conv_out,
                                                filter_size,
                                                channel_multiplier=1,
                                                depthwise_multiplier=2,
                                                convolutions=2,
                                                training=training,
                                                activation=activation,
                                                batch_norm=batch_norm,
                                                data_format="NCHW")

            result = layers.dropout(conv_out, keep_prob)

        return result

    # Build the network #

    with tf.variable_scope('contracting'):

        outputs = []

        with tf.variable_scope("step_0"):

            # Conv 1
            in_filters = in_channels
            out_filters = start_filters

            stddev = np.sqrt(2. / (filter_size**2 * in_filters))
            w = layers.weight_variable(
                [filter_size, filter_size, in_filters, out_filters],
                stddev=stddev,
                name="weights")

            out_ = tf.nn.conv2d(network_input,
                                w, [1, 1, 1, 1],
                                padding="SAME",
                                data_format="NCHW")
            out_ = out_ + layers.bias_variable([out_filters, 1, 1],
                                               name='biases')

            # Batch Norm 1
            if batch_norm:
                out_ = tf.layers.batch_normalization(out_,
                                                     axis=1,
                                                     momentum=0.999,
                                                     center=True,
                                                     scale=True,
                                                     training=training,
                                                     trainable=True,
                                                     name="batch_norm",
                                                     fused=True)

            in_filters = out_filters

            # concatenated ReLU
            if activation == "crelu":
                out_ = tf.concat([out_, -out_], axis=1)
                in_filters = 2 * in_filters
            out_ = tf.nn.relu(out_)

            # Conv 2
            stddev = np.sqrt(2. / (filter_size**2 * in_filters))
            w = layers.weight_variable(
                [filter_size, filter_size, in_filters, out_filters],
                stddev=stddev,
                name="weights")

            out_ = tf.nn.conv2d(out_,
                                w, [1, 1, 1, 1],
                                padding="SAME",
                                data_format="NCHW")
            out_ = out_ + layers.bias_variable([out_filters, 1, 1],
                                               name='biases')

            # Res Block 1
            conv_out = layers.res_block(out_,
                                        filter_size,
                                        channel_multiplier=1,
                                        depthwise_multiplier=2,
                                        convolutions=2,
                                        training=training,
                                        activation=activation,
                                        batch_norm=batch_norm,
                                        data_format="NCHW")

            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")

            bottom_out = layers.dropout(pool_out, keep_prob)
            side_out = layers.dropout(conv_out, keep_prob)

            outputs.append(side_out)

        # Build contracting path
        for i in xrange(1, depth):
            bottom_out, side_out = step_down('step_' + str(i),
                                             bottom_out,
                                             filter_size=filter_size,
                                             res_blocks=res_blocks,
                                             keep_prob=keep_prob,
                                             training=training)
            outputs.append(side_out)

    # Bottom [conv -> conv]
    with tf.variable_scope('step_' + str(depth)):

        with tf.variable_scope("res_block_0"):
            conv_out, tiled_input = layers.res_block(bottom_out,
                                                     filter_size,
                                                     channel_multiplier=2,
                                                     depthwise_multiplier=2,
                                                     convolutions=2,
                                                     training=training,
                                                     activation=activation,
                                                     batch_norm=batch_norm,
                                                     data_format="NCHW")
        for i in xrange(1, res_blocks):
            with tf.variable_scope("res_block_" + str(i)):
                conv_out = layers.res_block(conv_out,
                                            filter_size,
                                            channel_multiplier=1,
                                            depthwise_multiplier=2,
                                            convolutions=2,
                                            training=training,
                                            activation=activation,
                                            batch_norm=batch_norm,
                                            data_format="NCHW")

        conv_out = conv_out + tiled_input
        current_tensor = layers.dropout(conv_out, keep_prob)

    with tf.variable_scope('expanding'):

        # Set initial parameter
        outputs.reverse()

        # Build expanding path
        for i in xrange(depth):
            current_tensor = step_up('step_' + str(depth + i + 1),
                                     current_tensor,
                                     outputs[i],
                                     filter_size=filter_size,
                                     res_blocks=res_blocks,
                                     keep_prob=keep_prob,
                                     training=training)

    # Last layer is a 1x1 convolution to get the predictions
    # We don't want an activation function for this one (softmax will be applied later), so we're doing it manually
    in_filters = current_tensor.shape.as_list()[1]
    stddev = np.sqrt(2. / in_filters)

    with tf.variable_scope('classification'):

        w = layers.weight_variable([1, 1, in_filters, out_channels],
                                   stddev,
                                   name='weights')
        b = layers.bias_variable([out_channels, 1, 1], name='biases')

        conv = tf.nn.conv2d(current_tensor,
                            w,
                            strides=[1, 1, 1, 1],
                            padding="SAME",
                            data_format="NCHW",
                            name='conv')
        logits = conv + b

        logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob, training
def create_network(x,
                   keep_prob,
                   padding=False,
                   resolution=3,
                   features_root=16,
                   channels=3,
                   filter_size=3,
                   deconv_size=2,
                   layers_per_transpose=2,
                   summaries=True):
    """
    :param x: input tensor, shape [?,width,height,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in input image
    :param padding: boolean, True if inputs are padded before convolution
    :param resolution: corresponds to how large the output should be relative to the input
    :param features_root: number of features in the first layer
    :param filter_size: size of convolution filter
    :param deconv_size: size of deconv strides
    :param summaries: flag if summaries should be created
    """

    logging.info(
        "Resolution x{resolution}, features {features}, filter size {filter_size}x{filter_size}"
        .format(resolution=resolution,
                features=features_root,
                filter_size=filter_size))

    with tf.name_scope("preprocessing"):
        width = x.shape[1]  #tf.shape(x)[1]
        height = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, width, height, channels]))
        in_node = tf.zeros([-1, width, height, channels],
                           tf.float32)  # dummy input

    max_size = min(1024, width * 2**(resolution - 1))

    weights = []
    biases = []
    convs = []
    outputs = []
    convsDict = OrderedDict()
    deconvsDict = OrderedDict()

    size = 128
    which_conv = 0
    which_up_conv = 0
    stddev = 0
    out_features = features_root
    in_features = channels
    while size < 1024:
        if size == width:
            in_node = x_image
        with tf.name_scope("Conv{}".format(str(size) + str(which_conv))):
            for layer in range(0, layers_per_transpose):
                if layer == 0:
                    in_features = channels
                    out_features = features_root
                else:
                    in_features = out_features
                    out_features = int(out_features / 2)  # change if necessary
                stddev = np.sqrt(2 / (filter_size**2 * out_features))
                if size < width or size >= max_size:
                    trainable = False
                else:
                    trainable = True
                w = weight_variable(
                    [filter_size, filter_size, in_features, out_features],
                    stddev,
                    name="w" + str(size) + str(layer),
                    trainable=trainable)
                b = bias_variable([out_features],
                                  name="b" + str(size) + str(layer),
                                  trainable=trainable)
                if padding:
                    in_node = tf.pad(in_node,
                                     paddings=[[0, 0], [1, 1], [1, 1], [0, 0]],
                                     mode='SYMMETRIC')
                conv = conv2d(in_node, w, b, keep_prob)
                convsDict[which_conv] = tf.nn.relu(conv)
                in_node = convsDict[which_conv]
                if trainable:
                    weights.append(w)
                    biases.append(b)
                convs.append(conv)
                which_conv += 1
        # Upscalings...
        with tf.name_scope("Up_Conv{}".format(str(size) + str(which_up_conv))):
            stddev = np.sqrt(2 / (filter_size**2 * out_features))
            if layers_per_transpose == 0:
                in_features = channels
            else:
                in_features = out_features
            wd = weight_variable(
                [deconv_size, deconv_size, in_features, out_features],
                stddev,
                name="wd" + str(size) + str(layer),
                trainable=trainable)
            bd = bias_variable([out_features],
                               name="bd" + str(size) + str(layer),
                               trainable=trainable)
            deconv = tf.nn.relu(deconv2d(in_node, wd, deconv_size) + bd)
            deconvsDict[which_up_conv] = deconv
            if trainable:
                weights.append(wd)
                biases.append(bd)
            in_node = deconv
            which_up_conv += 1
            size *= 2
        # Outputs...
        with tf.name_scope("Output{}".format(str(size))):
            if size < width or size > max_size:
                trainable = False
            else:
                trainable = True
            weight = weight_variable([1, 1, out_features, channels],
                                     stddev,
                                     name="wOut" + str(size) + str(layer),
                                     trainable=trainable)
            bias = bias_variable([channels],
                                 name="bOut" + str(size) + str(layer),
                                 trainable=trainable)
            conv = conv2d(in_node, weight, bias, tf.constant(1.0))
            output = tf.nn.relu(conv)
            if trainable:
                weights.append(weight)
                biases.append(bias)
            outputs.append(output)
            in_node = output
            convsDict["Output_" + str(size)] = output

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c) in enumerate(convs):
                tf.summary.image('summary_conv_%02d' % i, get_image_summary(c))
            for i in ({256, 512, 1024}):
                tf.summary.image(
                    'summary_output_' + str(i),
                    get_image_summary(convsDict["Output_" + str(i)]))
            for k in deconvsDict.keys():
                tf.summary.image('summary_deconv_%02d' % k,
                                 get_image_summary(deconvsDict[k]))

    variables = []
    for w in weights:
        variables.append(w)
    for b in biases:
        variables.append(b)

    return outputs, variables
예제 #20
0
def parameter_efficient(in_channels=1, out_channels=2, start_filters=64, input_side_length=256, depth=4, res_blocks=2, filter_size=3, sparse_labels=True, batch_size=1, activation="cReLU", batch_norm=True):

    activation = str.lower(activation)
    if activation not in ["relu", "crelu"]:
        raise ValueError("activation must be \"ReLU\" or \"cReLU\".")

    pool_size = 2

    # Define inputs and helper functions #

    with tf.variable_scope('inputs'):
        inputs = tf.placeholder(tf.float32, shape=(batch_size, input_side_length, input_side_length, in_channels), name='inputs')
        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32, shape=(batch_size, input_side_length, input_side_length), name='labels')
        else:
            ground_truth = tf.placeholder(tf.float32, shape=(batch_size, input_side_length, input_side_length, out_channels), name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')
        training = tf.placeholder(tf.bool, shape=[], name="training")

        network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    # [conv -> conv -> max pool -> drop out] + parameter updates
    def step_down(name, input_, filter_size=3, res_blocks=2, keep_prob=1., training=False):

        with tf.variable_scope(name):
            
            with tf.variable_scope("res_block_0"):
                conv_out, tiled_input = layers.res_block(input_, filter_size, channel_multiplier=2, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
            
            for i in xrange(1, res_blocks):
                with tf.variable_scope("res_block_" + str(i)):
                    conv_out = layers.res_block(conv_out, filter_size, channel_multiplier=1, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
            
            conv_out = conv_out + tiled_input

            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            
            bottom_out = layers.dropout(pool_out, keep_prob)
            side_out = layers.dropout(conv_out, keep_prob)

        return bottom_out, side_out

    # parameter updates + [upconv and concat -> drop out -> conv -> conv]
    def step_up(name, bottom_input, side_input, filter_size=3, res_blocks=2, keep_prob=1., training=False):

        with tf.variable_scope(name):
            added_input = layers.upconv_add_block(bottom_input, side_input, data_format="NCHW")

            conv_out = added_input
            for i in xrange(res_blocks):
                with tf.variable_scope("res_block_" + str(i)):
                    conv_out = layers.res_block(conv_out, filter_size, channel_multiplier=1, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
            
            result = layers.dropout(conv_out, keep_prob)

        return result

    # Build the network #

    with tf.variable_scope('contracting'):

        outputs = []

        with tf.variable_scope("step_0"):

            # Conv 1
            in_filters = in_channels
            out_filters = start_filters

            stddev = np.sqrt(2. / (filter_size**2 * in_filters))
            w = layers.weight_variable([filter_size, filter_size, in_filters, out_filters], stddev=stddev, name="weights")

            out_ = tf.nn.conv2d(network_input, w, [1, 1, 1, 1], padding="SAME", data_format="NCHW")
            out_ = out_ + layers.bias_variable([out_filters, 1, 1], name='biases')

            # Batch Norm 1
            if batch_norm:
                out_ = tf.layers.batch_normalization(out_, axis=1, momentum=0.999, center=True, scale=True, training=training, trainable=True, name="batch_norm", fused=True)

            in_filters = out_filters

            # concatenated ReLU
            if activation == "crelu":
                out_ = tf.concat([out_, -out_], axis=1)
                in_filters = 2 * in_filters
            out_ = tf.nn.relu(out_)

            # Conv 2
            stddev = np.sqrt(2. / (filter_size**2 * in_filters))
            w = layers.weight_variable([filter_size, filter_size, in_filters, out_filters], stddev=stddev, name="weights")

            out_ = tf.nn.conv2d(out_, w, [1, 1, 1, 1], padding="SAME", data_format="NCHW")
            out_ = out_ + layers.bias_variable([out_filters, 1, 1], name='biases')

            # Res Block 1
            conv_out = layers.res_block(out_, filter_size, channel_multiplier=1, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")

            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            
            bottom_out = layers.dropout(pool_out, keep_prob)
            side_out = layers.dropout(conv_out, keep_prob)

            outputs.append(side_out)

        # Build contracting path
        for i in xrange(1, depth):
            bottom_out, side_out = step_down('step_' + str(i), bottom_out, filter_size=filter_size, res_blocks=res_blocks, keep_prob=keep_prob, training=training)
            outputs.append(side_out)

    # Bottom [conv -> conv]
    with tf.variable_scope('step_' + str(depth)):

        with tf.variable_scope("res_block_0"):
            conv_out, tiled_input = layers.res_block(bottom_out, filter_size, channel_multiplier=2, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
        for i in xrange(1, res_blocks):
            with tf.variable_scope("res_block_" + str(i)):
                conv_out = layers.res_block(conv_out, filter_size, channel_multiplier=1, depthwise_multiplier=2, convolutions=2, training=training, activation=activation, batch_norm=batch_norm, data_format="NCHW")
        
        conv_out = conv_out + tiled_input
        current_tensor = layers.dropout(conv_out, keep_prob)

    with tf.variable_scope('expanding'):

        # Set initial parameter
        outputs.reverse()

        # Build expanding path
        for i in xrange(depth):
            current_tensor = step_up('step_' + str(depth + i + 1), current_tensor, outputs[i], filter_size=filter_size, res_blocks=res_blocks, keep_prob=keep_prob, training=training)
 
    # Last layer is a 1x1 convolution to get the predictions
    # We don't want an activation function for this one (softmax will be applied later), so we're doing it manually
    in_filters = current_tensor.shape.as_list()[1]
    stddev = np.sqrt(2. / in_filters)

    with tf.variable_scope('classification'):

        w = layers.weight_variable([1, 1, in_filters, out_channels], stddev, name='weights')
        b = layers.bias_variable([out_channels, 1, 1], name='biases')

        conv = tf.nn.conv2d(current_tensor, w, strides=[1, 1, 1, 1], padding="SAME", data_format="NCHW", name='conv')
        logits = conv + b

        logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob, training
예제 #21
0
def unet(in_channels=1, out_channels=2, start_filters=64, input_side_length=572, depth=4, convolutions=2, filter_size=3, sparse_labels=True, batch_size=1, padded_convolutions=False):

    if not padded_convolutions:
        raise NotImplementedError("padded_convolutions=False has not yet been implemented!")

    pool_size = 2

    padding = "SAME" if padded_convolutions else "VALID"

    # Test whether input_side_length fits the depth, number of convolutions per step and filter_size
    output_side_length = input_side_length if padded_convolutions else get_output_side_length(input_side_length, depth, convolutions, filter_size, pool_size)

    # Define inputs and helper functions #
    with tf.variable_scope('inputs'):
        inputs = tf.placeholder(tf.float32, shape=(batch_size, input_side_length, input_side_length, in_channels), name='inputs')
        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32, shape=(batch_size, output_side_length, output_side_length), name='labels')
        else:
            ground_truth = tf.placeholder(tf.float32, shape=(batch_size, output_side_length, output_side_length, out_channels), name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')

        network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    # [conv -> conv -> max pool -> drop out] + parameter updates
    def step_down(name, _input):

        with tf.variable_scope(name):
            conv_out = layers.conv_block(_input, filter_size, channel_multiplier=2, convolutions=convolutions, padding=padding, data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            result = layers.dropout(pool_out, keep_prob)

        return result, conv_out

    # parameter updates + [upconv and concat -> drop out -> conv -> conv]
    def step_up(name, bottom_input, side_input):

        with tf.variable_scope(name):
            concat_out = layers.upconv_concat_block(bottom_input, side_input, data_format="NCHW")
            drop_out = layers.dropout(concat_out, keep_prob)
            result = layers.conv_block(drop_out, filter_size, channel_multiplier=0.5, convolutions=convolutions, padding=padding, data_format="NCHW")

        return result

    # Build the network #

    with tf.variable_scope('contracting'):

        # Set initial parameters
        outputs = []

        # Build contracting path
        with tf.variable_scope("step_0"):
            conv_out = layers.conv_block(network_input, filter_size, out_filters=start_filters, convolutions=convolutions, padding=padding, data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            current_tensor = layers.dropout(pool_out, keep_prob)
            outputs.append(conv_out)

        for i in xrange(1, depth):
            current_tensor, conv_out = step_down("step_" + str(i), current_tensor)
            outputs.append(conv_out)

    # Bottom [conv -> conv]
    with tf.variable_scope("step_" + str(depth)):
        current_tensor = layers.conv_block(current_tensor, filter_size, channel_multiplier=2, convolutions=convolutions, padding=padding, data_format="NCHW")

    with tf.variable_scope("expanding"):

        # Set initial parameter
        outputs.reverse()

        # Build expanding path
        for i in xrange(depth):
            current_tensor = step_up("step_" + str(depth + i + 1), current_tensor, outputs[i])

    # Last layer is a 1x1 convolution to get the predictions
    # We don't want an activation function for this one (softmax will be applied later), so we're doing it manually
    in_filters = current_tensor.shape.as_list()[1]
    stddev = np.sqrt(2. / in_filters)

    with tf.variable_scope("classification"):

        weight = layers.weight_variable([1, 1, in_filters, out_channels], stddev, name="weights")
        bias = layers.bias_variable([out_channels, 1, 1], name="biases")

        conv = tf.nn.conv2d(current_tensor, weight, strides=[1, 1, 1, 1], padding="VALID", name="conv", data_format="NCHW")
        logits = conv + bias

        logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob