Beispiel #1
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):

    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_h_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class], stddev)
    bias = bias_variable([n_class])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_h_convs["out"] = output_map

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Beispiel #2
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_h_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class], stddev)
    bias = bias_variable([n_class])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_h_convs["out"] = output_map

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Beispiel #3
0
def create_conv_net(x, keep_prob, channels, n_class, unet_kwargs):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    """
    layers = unet_kwargs.pop('layers', 3)
    features_root = unet_kwargs.pop('features_root', 16)
    filter_size = unet_kwargs.pop('filter_size', 3)
    pool_size = unet_kwargs.pop('pool_size', 2)

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    with tf.variable_scope('generator'):
        pools = OrderedDict()
        deconv = OrderedDict()
        dw_h_convs = OrderedDict()
        up_h_convs = OrderedDict()

        in_size = 1000
        size = in_size
        # down layers
        for layer in range(0, layers):
            features = 2**layer * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features], stddev)
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev)

            w2 = weight_variable(
                [filter_size, filter_size, features, features], stddev)
            b1 = bias_variable([features])
            b2 = bias_variable([features])

            conv1 = conv2d(in_node, w1, keep_prob)
            tmp_h_conv = tf.nn.relu(conv1 + b1)
            conv2 = conv2d(tmp_h_conv, w2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

            size -= 4
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

        in_node = dw_h_convs[layers - 1]

        # up layers
        for layer in range(layers - 2, -1, -1):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))

            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features], stddev)
            bd = bias_variable([features // 2])
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat

            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2], stddev)
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev)
            b1 = bias_variable([features // 2])
            b2 = bias_variable([features // 2])

            conv1 = conv2d(h_deconv_concat, w1, keep_prob)
            h_conv = tf.nn.relu(conv1 + b1)
            conv2 = conv2d(h_conv, w2, keep_prob)
            in_node = tf.nn.relu(conv2 + b2)
            up_h_convs[layer] = in_node

            size *= 2
            size -= 4

        # Output Map
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class])
        conv = conv2d(in_node, weight, tf.constant(1.0))
        output_map = tf.nn.relu(conv + bias)
        up_h_convs["out"] = output_map

    return output_map, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                         'generator'), int(in_size - size)
Beispiel #4
0
def create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.

    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}".format(
            layers=layers,
            features=features_root,
            filter_size=filter_size,
            pool_size=pool_size))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2 ** layer * features_root
            stddev = np.sqrt(2 / (filter_size ** 2 * features))
            if layer == 0:
                w1 = weight_variable([filter_size, filter_size, channels, features], stddev, name="w1")
            else:
                w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev, name="w1")

            w2 = weight_variable([filter_size, filter_size, features, features], stddev, name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")

            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(conv2)

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size -= 4
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2 ** (layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size ** 2 * features))

            wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev, name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat

            w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev, name="w1")
            w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev, name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")

            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.relu(conv2)
            up_h_convs[layer] = in_node

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
            size -= 4

    # Output Map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.relu(conv)
        up_h_convs["out"] = output_map

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k, get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k, get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram("dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations', up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Beispiel #5
0
def create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.

    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}".format(
            layers=layers,
            features=features_root,
            filter_size=filter_size,
            pool_size=pool_size))

    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2 ** layer * features_root #每层的特征为16,32,64, 是2的幂次数
            stddev = np.sqrt(2 / (filter_size ** 2 * features))
            if layer == 0:
                w1 = weight_variable([filter_size, filter_size, channels, features], stddev, name="w1") #第一层直接channels变features [3,3,1,16]
            else:
                w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev, name="w1") #第二层和第三层channels变两倍

            w2 = weight_variable([filter_size, filter_size, features, features], stddev, name="w2") #w : [width, height, channels, kernel_nums]
            b1 = bias_variable([features], name="b1")  # x : [batch_size,image_height,image_width,channels]
            b2 = bias_variable([features], name="b2")  # b : [channels]

            conv1 = conv2d(in_node, w1, b1, keep_prob) # [?,?,?,16]
            tmp_h_conv = tf.nn.relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(conv2)

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size -= 2 * 2 * (filter_size // 2) # valid conv
            if layer < layers - 1: #直到倒数第二个
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= pool_size #总的图片大小变为一半

    in_node = dw_h_convs[layers - 1] #使最后第一个层变为输入

    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2 ** (layer + 1) * features_root  #feature倒过来,64,32,16
            stddev = np.sqrt(2 / (filter_size ** 2 * features))

            wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev, name="wd") # [2,2,32,64]
            bd = bias_variable([features // 2], name="bd") # 32
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd) # 下采样最后一个层来进行上采样,然后加上 下采样的倒数第二层
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv) #此时layer为1,即倒数第二层
            deconv[layer] = h_deconv_concat

            w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev, name="w1") # [3,3,64,32],这里才开始上采样,可以看到features // 2(变小)
            w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev, name="w2") # [3,3,32,32]
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")

            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.relu(conv2)
            up_h_convs[layer] = in_node

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= pool_size
            size -= 2 * 2 * (filter_size // 2) # valid conv

    # Output Map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev) #(1,1,16,2)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.relu(conv)
        up_h_convs["out"] = output_map

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k, get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k, get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram("dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations', up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Beispiel #6
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size

    # find the number of pixels to suppress in one edge according to size of convolution filter
    pixelEdge = (filter_size - 1) / 2
    # compute the total number of pixel to remove at each time we use convolution filter
    step = pixelEdge * 2
    # Number of convolution in one layers (it is not a parameters)
    nLayerofConvolution = 2
    # Variable use to count the number of convolution and max-pooling into architecture
    nConvFilter = 0
    nMaxPooling = 0
    with tf.name_scope('DOWN_LAYER') as scope:
        # down layers
        for layer in range(0, layers):
            features = 2**layer * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features], stddev)
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev)

            w2 = weight_variable(
                [filter_size, filter_size, features, features], stddev)
            b1 = bias_variable([features])
            b2 = bias_variable([features])

            conv1 = conv2d(in_node, w1, keep_prob)
            tmp_h_conv = tf.nn.relu(conv1 + b1)
            conv2 = conv2d(tmp_h_conv, w2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

            nConvFilter += 2

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size = math.floor(size - (step * nLayerofConvolution))

            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size = math.floor(size / pool_size)
                nMaxPooling += 1

    in_node = dw_h_convs[layers - 1]

    with tf.name_scope('UP_LAYERS') as scope:
        # up layers
        for layer in range(layers - 2, -1, -1):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))

            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features], stddev)
            bd = bias_variable([features // 2])
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) +
                                  bd)  #up-conv 2x2
            h_deconv_concat = crop_and_concat(dw_h_convs[layer],
                                              h_deconv)  #copy and crop
            deconv[layer] = h_deconv_concat

            size = math.floor(size * pool_size)

            nMaxPooling += 1
            nConvFilter += 1

            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2], stddev)
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev)
            b1 = bias_variable([features // 2])
            b2 = bias_variable([features // 2])

            conv1 = conv2d(h_deconv_concat, w1, keep_prob)  #conv
            h_conv = tf.nn.relu(conv1 + b1)  #relu
            conv2 = conv2d(h_conv, w2, keep_prob)  #conv
            in_node = tf.nn.relu(conv2 + b2)  #relu
            up_h_convs[layer] = in_node

            nConvFilter += 2

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size = math.floor(size - (step * nLayerofConvolution))

    with tf.name_scope('OUTPUT') as scope:
        # Output Map
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class])
        conv = conv2d(in_node, weight, tf.constant(1.0))  #Last convolution 1x1
        output_map = tf.nn.relu(conv + bias)
        up_h_convs["out"] = output_map
        nMaxPooling += 1
        nConvFilter += 1

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    print("\n\tpixelEdge = " + str(pixelEdge))
    print("\tOffset pixels with input image = " + str(in_size - size) +
          " pixels")
    print("\tNumber of pixel to suppress : " + str((in_size - size) / 4) +
          " by edge")
    print('\tTotal Number of Convolution Filters : ' + str(nConvFilter))
    print('\tNumber of max-pooling & deconvolution : ' + str(nMaxPooling))

    return output_map, variables, int(in_size - size)
Beispiel #7
0
def create_conv_net(x, keep_prob, channels, n_class,reuse=None, layers=3, features_root=8, filter_size=3, pool_size=2, summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.

    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info("Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}".format(layers=layers,
                                                                                                           features=features_root,
                                                                                                           filter_size=filter_size,
                                                                                                           pool_size=pool_size))

    x_shp = [_s if _s is not None else -1 for _s in x.get_shape().as_list()]
    nx = x_shp[1]
    ny = x_shp[2]
    nz = x_shp[3]
    # Placeholder for the input image
    #nx = tf.shape(x)[1]
    #ny = tf.shape(x)[2]
    #nz = tf.shape(x)[3]
    x_image = tf.reshape(x, tf.stack([-1,nx,ny,nz,channels]))

    in_node= x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    #pools = OrderedDict()
    #deconv = OrderedDict()
    dw_h_convs_r = OrderedDict()
    #up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    print("initial size", in_node.get_shape())
    # down layers
    with tf.device('/gpu:1'),tf.variable_scope("convdown",reuse=reuse):
        for layer in range(0, layers):
            with tf.variable_scope("level" + str(layer),reuse=reuse):
                features = 2**layer*features_root
                stddev = np.sqrt(2 / (filter_size**2 * features))

                if layer ==0:
                    with tf.variable_scope("real",reuse=reuse):
                        wr = weight_variable([filter_size, filter_size,filter_size, channels, features], stddev)
                        br = bias_variable([features],0.0)
                else:
                    with tf.variable_scope("real",reuse=reuse):
                        wr = weight_variable([filter_size, filter_size,filter_size, features//2, features], stddev)
                        br = bias_variable([features],0.0)

                #################################################
                if False:
                    conv_real = conv3d(in_node, wr, keep_prob,1)
                    dw_h_convs_r[layer] = tf.nn.relu(conv_real+br)


                    size -= 4
                    if layer < layers-1:
                        with tf.variable_scope("real2",reuse=reuse):
                            wr2 = weight_variable([filter_size, filter_size,filter_size, features, features], stddev)
                            br2 = bias_variable([features],0.0)
                        in_node= tf.nn.relu(conv3d(dw_h_convs_r[layer], wr2,keep_prob,pool_size)+br2)
                        size /= 2

                #################################################
                if True:
                    conv_real = conv3d(in_node, wr, keep_prob,1)
                    dw_h_convs_r[layer] = tf.nn.relu(conv_real+br)
                    size -= 4
                    if layer < layers-1:
                        in_node= max_pool(dw_h_convs_r[layer], pool_size)
                        size /= 2

                print("output size-conv down", in_node.get_shape())


        in_node = dw_h_convs_r[layers-1]

    print("output size-last layer", (in_node.get_shape()))
    # up layers
    with tf.device('/gpu:0'), tf.variable_scope("convup",reuse=reuse):
        for layer in range(layers-2, -1, -1):
            with tf.variable_scope("level" + str(layer),reuse=reuse):
                features = 2**(layer+1)*features_root
                stddev = np.sqrt(2 / (filter_size**2 * features))

                with tf.variable_scope("real_deconv",reuse=reuse):
                    wdr = weight_variable_devonc([pool_size, pool_size, pool_size, features//2, features], stddev)
                    bdr = bias_variable([features//2],0.0)
                print("before size up", in_node.get_shape())
                o_size=in_node.get_shape()

                #conv_real = deconv3d(in_node_real, wdr, pool_size, o_size,batchN)-deconv3d(in_node_imag,wdi,pool_size,o_size,batchN)
                conv_real = deconv3d(in_node, wdr, pool_size, o_size,1)
                conv_real = tf.nn.relu(conv_real + bdr)

                print("output size-conv up", conv_real.get_shape())
                #conv_real = crop_and_concat(dw_h_convs_r[layer], conv_real)
                conv_real=conv_real+dw_h_convs_r[layer]
                print("output size-crop_and_concat", conv_real.get_shape())

                with tf.variable_scope("real_conv",reuse=reuse):
                    wr = weight_variable([filter_size, filter_size, filter_size, conv_real.shape[4], features//2], stddev)
                    br = bias_variable([features//2],0.0)

                in_node= conv3d(conv_real, wr, keep_prob,1)
                in_node= tf.nn.relu(in_node+br)

                size *= 2
                size -= 4


    with tf.device('/gpu:1'),tf.variable_scope("last_unet",reuse=reuse):
        with tf.variable_scope("real",reuse=reuse):
            wr = weight_variable([1, 1,1, features_root, n_class], stddev)
            br = bias_variable([n_class])

            conv_real = conv3d(in_node, wr, keep_prob,1)
            in_node=conv_real+br

    print("output size-final", conv_real.get_shape())
    output_map = in_node+x_image

    #output = output * self.ksp_m_inv + layer * self.ksp_m

    #if summaries:
    #for i, (c1, c2) in enumerate(convs):
    #    tf.summary.image('summary_conv_%02d_01'%i, get_image_summary(c1))
    #    tf.summary.image('summary_conv_%02d_02'%i, get_image_summary(c2))

    #for k in pools.keys():
    #    tf.summary.image('summary_pool_%02d'%k, get_image_summary(pools[k]))

    #for k in deconv.keys():
    #    tf.summary.image('summary_deconv_concat_%02d'%k, get_image_summary(deconv[k]))

    #for k in dw_h_convs.keys():
    #    tf.summary.histogram("dw_convolution_%02d"%k + '/activations', dw_h_convs[k])

    #for k in up_h_convs.keys():
    #    tf.summary.histogram("up_convolution_%s"%k + '/activations', up_h_convs[k])

    return output_map, int(in_size - size)