Пример #1
0
    def __init__(self, in_dim, out_dim, act_fn):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        act_fn = act_fn

        self.conv_1 = conv_block(self.in_dim, self.out_dim, act_fn)
        self.conv_2 = conv_block_3(self.out_dim, self.out_dim, act_fn)
        self.conv_3 = conv_block(self.out_dim, self.out_dim, act_fn)
Пример #2
0
    def step_up(name, bottom_input, side_input):

        with tf.variable_scope(name):
            concat_out = layers.upconv_concat_block(bottom_input, side_input, data_format="NCHW")
            drop_out = layers.dropout(concat_out, keep_prob)
            result = layers.conv_block(drop_out, filter_size, channel_multiplier=0.5, convolutions=convolutions, padding=padding, data_format="NCHW")

        return result
Пример #3
0
    def step_down(name, _input):

        with tf.variable_scope(name):
            conv_out = layers.conv_block(_input, filter_size, channel_multiplier=2, convolutions=convolutions, padding=padding, data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            result = layers.dropout(pool_out, keep_prob)

        return result, conv_out
Пример #4
0
 def __init__(self, x_dim=3, hid_dim=64, z_dim=64, type = 0):
     super().__init__()
     if type == 0 :
         self.encoder = nn.Sequential(
             conv_block(x_dim, hid_dim),
             conv_block(hid_dim, hid_dim),
             conv_block(hid_dim, hid_dim),
             conv_block(hid_dim, z_dim),
         )
     elif type == 1 :
         self.encoder = nn.Sequential(
             conv_block(x_dim, hid_dim),
             conv_block(hid_dim, hid_dim),
             conv_block(hid_dim, z_dim),
             conv_block_downsample(hid_dim, hid_dim),
         )
     else :
         self.encoder = nn.Sequential(
             conv_block(x_dim, hid_dim),
             conv_block(hid_dim, hid_dim),
             conv_block_downsample(hid_dim, hid_dim),
             conv_block_downsample(hid_dim, hid_dim),
         )
     self.out_channels = 1600
Пример #5
0
def unet(in_channels=1, out_channels=2, start_filters=64, input_side_length=572, depth=4, convolutions=2, filter_size=3, sparse_labels=True, batch_size=1, padded_convolutions=False):

    if not padded_convolutions:
        raise NotImplementedError("padded_convolutions=False has not yet been implemented!")

    pool_size = 2

    padding = "SAME" if padded_convolutions else "VALID"

    # Test whether input_side_length fits the depth, number of convolutions per step and filter_size
    output_side_length = input_side_length if padded_convolutions else get_output_side_length(input_side_length, depth, convolutions, filter_size, pool_size)

    # Define inputs and helper functions #
    with tf.variable_scope('inputs'):
        inputs = tf.placeholder(tf.float32, shape=(batch_size, input_side_length, input_side_length, in_channels), name='inputs')
        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32, shape=(batch_size, output_side_length, output_side_length), name='labels')
        else:
            ground_truth = tf.placeholder(tf.float32, shape=(batch_size, output_side_length, output_side_length, out_channels), name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')

        network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    # [conv -> conv -> max pool -> drop out] + parameter updates
    def step_down(name, _input):

        with tf.variable_scope(name):
            conv_out = layers.conv_block(_input, filter_size, channel_multiplier=2, convolutions=convolutions, padding=padding, data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            result = layers.dropout(pool_out, keep_prob)

        return result, conv_out

    # parameter updates + [upconv and concat -> drop out -> conv -> conv]
    def step_up(name, bottom_input, side_input):

        with tf.variable_scope(name):
            concat_out = layers.upconv_concat_block(bottom_input, side_input, data_format="NCHW")
            drop_out = layers.dropout(concat_out, keep_prob)
            result = layers.conv_block(drop_out, filter_size, channel_multiplier=0.5, convolutions=convolutions, padding=padding, data_format="NCHW")

        return result

    # Build the network #

    with tf.variable_scope('contracting'):

        # Set initial parameters
        outputs = []

        # Build contracting path
        with tf.variable_scope("step_0"):
            conv_out = layers.conv_block(network_input, filter_size, out_filters=start_filters, convolutions=convolutions, padding=padding, data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            current_tensor = layers.dropout(pool_out, keep_prob)
            outputs.append(conv_out)

        for i in xrange(1, depth):
            current_tensor, conv_out = step_down("step_" + str(i), current_tensor)
            outputs.append(conv_out)

    # Bottom [conv -> conv]
    with tf.variable_scope("step_" + str(depth)):
        current_tensor = layers.conv_block(current_tensor, filter_size, channel_multiplier=2, convolutions=convolutions, padding=padding, data_format="NCHW")

    with tf.variable_scope("expanding"):

        # Set initial parameter
        outputs.reverse()

        # Build expanding path
        for i in xrange(depth):
            current_tensor = step_up("step_" + str(depth + i + 1), current_tensor, outputs[i])

    # Last layer is a 1x1 convolution to get the predictions
    # We don't want an activation function for this one (softmax will be applied later), so we're doing it manually
    in_filters = current_tensor.shape.as_list()[1]
    stddev = np.sqrt(2. / in_filters)

    with tf.variable_scope("classification"):

        weight = layers.weight_variable([1, 1, in_filters, out_channels], stddev, name="weights")
        bias = layers.bias_variable([out_channels, 1, 1], name="biases")

        conv = tf.nn.conv2d(current_tensor, weight, strides=[1, 1, 1, 1], padding="VALID", name="conv", data_format="NCHW")
        logits = conv + bias

        logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob
Пример #6
0
def unet(in_channels=1,
         out_channels=2,
         start_filters=64,
         side_length=572,
         depth=4,
         convolutions=2,
         filter_size=3,
         sparse_labels=True,
         batch_size=1):
    """
    Creates the graph for the standard U-Net and sets up the appropriate input and output placeholder.

    Parameters
    ----------
    in_channels: int
        The depth of the input.
    out_channels: int
        The depth of number of classes of the output.
    start_filters : int
        The number of filters in the first convolution.
    side_length: int
        The side length of the square input.
    depth: int
        The depth of the U-part of the network. This is equal to the number of max-pooling layers.
    convolutions: int
        The number of convolutions in between max-pooling layers on the down-path and in between up-convolutions on the up-path.
    filter_size: int
        The width and height of the filter. The receptive field.
    sparse_labels: bool
        If true, the labels are integers, one integer per pixel, denoting the class that that pixel belongs to. If false, labels are one-hot encoded.
    batch_size: int
        The training batch size.

    Returns
    -------
    inputs : TF tensor
        The network input.
    logits: TF tensor
        The network output before SoftMax.
    ground_truth: TF tensor
        The desired output from the ground truth.
    keep_prob: TF float
        The TF variable holding the keep probability for drop out layers.  
    """

    pool_size = 2
    padding = "SAME"

    # Define inputs and helper functions #
    with tf.variable_scope('inputs'):
        inputs = tf.placeholder(tf.float32,
                                shape=(batch_size, side_length, side_length,
                                       in_channels),
                                name='inputs')
        if sparse_labels:
            ground_truth = tf.placeholder(tf.int32,
                                          shape=(batch_size, side_length,
                                                 side_length),
                                          name='labels')
        else:
            ground_truth = tf.placeholder(tf.float32,
                                          shape=(batch_size, side_length,
                                                 side_length, out_channels),
                                          name='labels')
        keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob')

        network_input = tf.transpose(inputs, perm=[0, 3, 1, 2])

    # [conv -> conv -> max pool -> drop out] + parameter updates
    def step_down(name, _input):

        with tf.variable_scope(name):
            conv_out = layers.conv_block(_input,
                                         filter_size,
                                         channel_multiplier=2,
                                         convolutions=convolutions,
                                         padding=padding,
                                         data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            result = layers.dropout(pool_out, keep_prob)

        return result, conv_out

    # parameter updates + [upconv and concat -> drop out -> conv -> conv]
    def step_up(name, bottom_input, side_input):

        with tf.variable_scope(name):
            concat_out = layers.upconv_concat_block(bottom_input,
                                                    side_input,
                                                    data_format="NCHW")
            drop_out = layers.dropout(concat_out, keep_prob)
            result = layers.conv_block(drop_out,
                                       filter_size,
                                       channel_multiplier=0.5,
                                       convolutions=convolutions,
                                       padding=padding,
                                       data_format="NCHW")

        return result

    # Build the network #

    with tf.variable_scope('contracting'):

        # Set initial parameters
        outputs = []

        # Build contracting path
        with tf.variable_scope("step_0"):
            conv_out = layers.conv_block(network_input,
                                         filter_size,
                                         out_filters=start_filters,
                                         convolutions=convolutions,
                                         padding=padding,
                                         data_format="NCHW")
            pool_out = layers.max_pool(conv_out, pool_size, data_format="NCHW")
            current_tensor = layers.dropout(pool_out, keep_prob)
            outputs.append(conv_out)

        for i in xrange(1, depth):
            current_tensor, conv_out = step_down("step_" + str(i),
                                                 current_tensor)
            outputs.append(conv_out)

    # Bottom [conv -> conv]
    with tf.variable_scope("step_" + str(depth)):
        current_tensor = layers.conv_block(current_tensor,
                                           filter_size,
                                           channel_multiplier=2,
                                           convolutions=convolutions,
                                           padding=padding,
                                           data_format="NCHW")

    with tf.variable_scope("expanding"):

        # Set initial parameter
        outputs.reverse()

        # Build expanding path
        for i in xrange(depth):
            current_tensor = step_up("step_" + str(depth + i + 1),
                                     current_tensor, outputs[i])

    # Last layer is a 1x1 convolution to get the predictions
    # We don't want an activation function for this one (softmax will be applied later), so we're doing it manually
    in_filters = current_tensor.shape.as_list()[1]
    stddev = np.sqrt(2. / in_filters)

    with tf.variable_scope("classification"):

        weight = layers.weight_variable([1, 1, in_filters, out_channels],
                                        stddev,
                                        name="weights")
        bias = layers.bias_variable([out_channels, 1, 1], name="biases")

        conv = tf.nn.conv2d(current_tensor,
                            weight,
                            strides=[1, 1, 1, 1],
                            padding="VALID",
                            name="conv",
                            data_format="NCHW")
        logits = conv + bias

        logits = tf.transpose(logits, perm=[0, 2, 3, 1])

    return inputs, logits, ground_truth, keep_prob