示例#1
0
def get_model(X, batch_size, image_dimension):

    input_shape = (batch_size, 3, image_dimension, image_dimension)
    all_parameters = []

    #############################################
    # a first convolution with 32 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        X, X, input_shape, 32, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # a second convolution with 32 (5, 5) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 32, (5, 5))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # MLP first layer

    output = output.flatten(2)
    output_test = output_test.flatten(2)

    output, output_test, params, output_shape = linear(
        output, output_test,
        (output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]),
        500)
    all_parameters += params

    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # MLP second layer

    output, output_test, params, output_shape = linear(output, output_test,
                                                       output_shape, 1)
    all_parameters += params

    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'sigmoid')

    #
    return output, output_test, all_parameters
示例#2
0
    def __init__(self, input_nc, output_nc, ngf=32):
        super().__init__()
        self.in_dim = input_nc
        self.out_dim = ngf
        self.final_out_dim = output_nc
        act_fn = nn.LeakyReLU(0.2, inplace=True)
        act_fn_2 = nn.ReLU()

        # Encoder
        self.down_1 = CoordConv_residual_conv(self.in_dim, self.out_dim,
                                              act_fn)
        self.pool_1 = maxpool()
        self.down_2 = CoordConv_residual_conv(self.out_dim, self.out_dim * 2,
                                              act_fn)
        self.pool_2 = maxpool()
        self.down_3 = CoordConv_residual_conv(self.out_dim * 2,
                                              self.out_dim * 4, act_fn)
        self.pool_3 = maxpool()
        self.down_4 = CoordConv_residual_conv(self.out_dim * 4,
                                              self.out_dim * 8, act_fn)
        self.pool_4 = maxpool()

        # Bridge between Encoder-Decoder
        self.bridge = CoordConv_residual_conv(self.out_dim * 8,
                                              self.out_dim * 16, act_fn)

        # Decoder
        self.deconv_1 = conv_decod_block(self.out_dim * 16, self.out_dim * 8,
                                         act_fn_2)
        self.up_1 = CoordConv_residual_conv(self.out_dim * 8, self.out_dim * 8,
                                            act_fn_2)
        self.deconv_2 = conv_decod_block(self.out_dim * 8, self.out_dim * 4,
                                         act_fn_2)
        self.up_2 = CoordConv_residual_conv(self.out_dim * 4, self.out_dim * 4,
                                            act_fn_2)
        self.deconv_3 = conv_decod_block(self.out_dim * 4, self.out_dim * 2,
                                         act_fn_2)
        self.up_3 = CoordConv_residual_conv(self.out_dim * 2, self.out_dim * 2,
                                            act_fn_2)
        self.deconv_4 = conv_decod_block(self.out_dim * 2, self.out_dim,
                                         act_fn_2)
        self.up_4 = CoordConv_residual_conv(self.out_dim, self.out_dim,
                                            act_fn_2)

        self.out = nn.Conv2d(self.out_dim,
                             self.final_out_dim,
                             kernel_size=3,
                             stride=1,
                             padding=1)

        # Params initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        print(f"Initialized {self.__class__.__name__} succesfully")
示例#3
0
def get_model(X, batch_size, image_dimension):

	input_shape = (batch_size, 3, image_dimension, image_dimension)
	all_parameters = []

	#############################################
	# a first convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(X, X, input_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# a second convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	
	#############################################
	# a third convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# MLP first layer

	output = output.flatten(2)
	output_test = output_test.flatten(2)
	
	output, output_test, params, output_shape = linear(output, output_test, (output_shape[0], output_shape[1]*output_shape[2]*output_shape[3]), 500)
	all_parameters += params

	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# MLP second layer

	output, output_test, params, output_shape = linear(output, output_test, output_shape, 1)
	all_parameters += params

	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'sigmoid')

	#
	return output, output_test, all_parameters
示例#4
0
    def __call__(self, x):
        x = tf.cast(x, dtype=tf.float32)
        self.conv1 = ll.conv2dx(x, self.model_weights[0], 1)  # 1st Layer
        self.conv1 = ll.conv2dx(self.conv1, self.model_weights[1], 1)
        self.conv1 = ll.conv2dx(self.conv1, self.model_weights[2], 1)
        self.pool1 = ll.maxpool(self.conv1, 2, 2)

        self.conv2 = ll.conv2dx(self.pool1, self.model_weights[3],
                                1)  # 2nd Layer
        self.conv2 = ll.conv2dx(self.conv2, self.model_weights[4], 1)
        self.conv2 = ll.conv2dx(self.conv2, self.model_weights[5], 1)
        self.pool2 = ll.maxpool(self.conv2, 2, 2)

        self.conv3 = ll.conv2dx(self.pool2, self.model_weights[6],
                                1)  # 3rd Layer
        self.conv3 = ll.conv2dx(self.conv3, self.model_weights[7], 1)
        self.conv3 = ll.conv2dx(self.conv3, self.model_weights[8], 1)
        self.pool3 = ll.maxpool(self.conv3, 2, 2)

        self.conv4 = ll.conv2dx(self.pool3, self.model_weights[9],
                                1)  # 4th Layer
        self.conv4 = ll.conv2dx(self.conv4, self.model_weights[10], 1)
        self.conv4 = ll.conv2dx(self.conv4, self.model_weights[11], 1)
        self.pool4 = ll.maxpool(self.conv4, 2, 2)

        self.conv5 = ll.conv2dx(self.pool4, self.model_weights[12],
                                1)  # 5th Layer
        self.conv5 = ll.conv2dx(self.conv5, self.model_weights[13], 1)
        self.conv5 = ll.conv2dx(self.conv5, self.model_weights[14], 1)
        self.pool5 = ll.maxpool(self.conv5, 2, 2)

        self.conv6 = ll.conv2dx(self.pool5, self.model_weights[15],
                                1)  # 6th Layer
        self.conv6 = ll.conv2dx(self.conv6, self.model_weights[16], 1)
        self.conv6 = ll.conv2dx(self.conv6, self.model_weights[17], 1)
        self.pool6 = ll.maxpool(self.conv6, 2, 2)

        self.flatten_layer = tf.reshape(self.pool6,
                                        shape=(tf.shape(self.pool6)[0],
                                               -1))  # flatten

        self.dense1 = ll.dense(self.flatten_layer, self.model_weights[18],
                               self.dropout_rate)
        self.dense2 = ll.dense(self.dense1, self.model_weights[19],
                               self.dropout_rate)
        self.dense3 = ll.dense(self.dense2, self.model_weights[20],
                               self.dropout_rate)
        self.dense4 = ll.dense(self.dense3, self.model_weights[21],
                               self.dropout_rate)
        self.dense5 = ll.dense(self.dense4, self.model_weights[22],
                               self.dropout_rate)
        self.dense6 = tf.matmul(self.dense5, self.model_weights[23])

        return tf.nn.softmax(self.dense6)
示例#5
0
def get_model(X, batch_size, image_dimension):

	input_shape = (batch_size, 3, image_dimension[0], image_dimension[1])
	all_parameters = []
	acc_parameters = []

	#############################################
	# a first convolution with 64 (3, 3) filters
	output, output_test, params, output_shape = convolutional(X, X, input_shape, 64, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# a second convolution with 128 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 128, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	
	#############################################
	# 2 convolutional layers with 256 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 256, (3, 3))
	all_parameters += params
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 256, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# Fully connected
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 1024, (1, 1))
	all_parameters += params
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 1024, (1, 1))
	all_parameters += params

	# maxpool with size=(4, 4) and fully connected
	output, output_test, params, output_shape = avgpool(output, output_test, output_shape, (4, 4))
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 10, (1, 1))
	all_parameters += params

	output, output_test, params, output_shape, cacc_parameters = batch_norm(output, output_test, output_shape)
	acc_parameters += cacc_parameters
	all_parameters += params

	# softmax
	output = multi_dim_softmax(output)
	output_test = multi_dim_softmax(output_test)

	#
	return output, output_test, all_parameters, acc_parameters
示例#6
0
def build(color_inputs, num_classes, is_training):
    """
    Build unet network:
    ----------
    Args:
        color_inputs: Tensor, [batch_size, height, width, 3]
        num_classes: Integer, number of segmentation (annotation) labels
        is_training: Boolean, in training mode or not (for dropout & bn)
    Returns:
        logits: Tensor, predicted annotated image flattened 
                              [batch_size * height * width,  num_classes]
    """

    dropout_keep_prob = tf.where(is_training, 0.2, 1.0)

    # Encoder Section
    # Block 1
    color_conv1_1 = layers.conv_btn(color_inputs,  [3, 3], 64, 'conv1_1', is_training = is_training)
    color_conv1_2 = layers.conv_btn(color_conv1_1, [3, 3], 64, 'conv1_2', is_training = is_training)
    color_pool1   = layers.maxpool(color_conv1_2, [2, 2],  'pool1')

    # Block 2
    color_conv2_1 = layers.conv_btn(color_pool1,   [3, 3], 128, 'conv2_1', is_training = is_training)
    color_conv2_2 = layers.conv_btn(color_conv2_1, [3, 3], 128, 'conv2_2', is_training = is_training)
    color_pool2   = layers.maxpool(color_conv2_2, [2, 2],   'pool2')

    # Block 3
    color_conv3_1 = layers.conv_btn(color_pool2,   [3, 3], 256, 'conv3_1', is_training = is_training)
    color_conv3_2 = layers.conv_btn(color_conv3_1, [3, 3], 256, 'conv3_2', is_training = is_training)
    color_pool3   = layers.maxpool(color_conv3_2, [2, 2],   'pool3')
    color_drop3   = layers.dropout(color_pool3, dropout_keep_prob, 'drop3')

    # Block 4
    color_conv4_1 = layers.conv_btn(color_drop3,   [3, 3], 512, 'conv4_1', is_training = is_training)
    color_conv4_2 = layers.conv_btn(color_conv4_1, [3, 3], 512, 'conv4_2', is_training = is_training)
    color_pool4   = layers.maxpool(color_conv4_2, [2, 2],   'pool4')
    color_drop4   = layers.dropout(color_pool4, dropout_keep_prob, 'drop4')

    # Block 5
    color_conv5_1 = layers.conv_btn(color_drop4,   [3, 3], 1024, 'conv5_1', is_training = is_training)
    color_conv5_2 = layers.conv_btn(color_conv5_1, [3, 3], 1024, 'conv5_2', is_training = is_training)
    color_drop5   = layers.dropout(color_conv5_2, dropout_keep_prob, 'drop5')

    # Decoder Section
    # Block 1
    upsample6     = layers.deconv_upsample(color_drop5, 2,  'upsample6')
    concat6       = layers.concat(upsample6, color_conv4_2, 'contcat6')
    color_conv6_1 = layers.conv_btn(concat6,       [3, 3], 512, 'conv6_1', is_training = is_training)
    color_conv6_2 = layers.conv_btn(color_conv6_1, [3, 3], 512, 'conv6_1', is_training = is_training)
    color_drop6   = layers.dropout(color_conv6_2, dropout_keep_prob, 'drop6')

    # Block 2
    upsample7     = layers.deconv_upsample(color_drop6, 2,  'upsample7')
    concat7       = layers.concat(upsample7, color_conv3_2, 'concat7')
    color_conv7_1 = layers.conv_btn(concat7,       [3, 3], 256, 'conv7_1', is_training = is_training)
    color_conv7_2 = layers.conv_btn(color_conv7_1, [3, 3], 256, 'conv7_1', is_training = is_training)
    color_drop7   = layers.dropout(color_conv7_2, dropout_keep_prob, 'drop7')

    # Block 3
    upsample8     = layers.deconv_upsample(color_drop7, 2,  'upsample8')
    concat8       = layers.concat(upsample8, color_conv2_2, 'concat8')
    color_conv8_1 = layers.conv_btn(concat8,       [3, 3], 128, 'conv8_1', is_training = is_training)
    color_conv8_2 = layers.conv_btn(color_conv8_1, [3, 3], 128, 'conv8_1', is_training = is_training)

    # Block 4
    upsample9     = layers.deconv_upsample(color_conv9_2, 2, 'upsample9')
    concat9       = layers.concat(upsample8, color_conv1_2,  'concat9')
    color_conv9_1 = layers.conv_btn(concat9,       [3, 3], 64,   'conv9_1', is_training = is_training)
    color_conv9_2 = layers.conv_btn(color_conv9_1, [3, 3], 64,   'conv9_1', is_training = is_training)

    # Block 5
    score  = layers.conv(color_conv9_2, [1, 1], num_classes, 'score', activation_fn = None)
    logits = tf.reshape(score, (-1, num_classes))

    return logits
示例#7
0
def build_30s(color_inputs, num_classes, is_training):
    """
    Build unet network:
    ----------
    Args:
        color_inputs: Tensor, [batch_size, length, 3]
        num_classes: Integer, number of segmentation (annotation) labels
        is_training: Boolean, in training mode or not (for dropout & bn)
    Returns:
        logits: Tensor, predicted annotated image flattened 
                              [batch_size * length,  num_classes]
    """

    dropout_keep_prob = tf.where(is_training, 0.2, 1.0)

    # Encoder Section
    # Block 1
    # color_conv1_1 = layers.conv_btn(color_inputs,  [3, 3], 64, 'conv1_1', is_training = is_training)

    color_conv1_1 = layers.conv_btn1(color_inputs,
                                     3,
                                     32,
                                     'conv1_1',
                                     is_training=is_training)
    #layers.conv1(current_layer, c, ksize, stride=2, scope='conv{}'.format(i + 1), padding='SAME')
    color_conv1_2 = layers.conv_btn1(color_conv1_1,
                                     3,
                                     32,
                                     'conv1_2',
                                     is_training=is_training)
    color_pool1 = layers.maxpool(color_conv1_2, 4, 'pool1')

    # Block 2
    color_conv2_1 = layers.conv_btn1(color_pool1,
                                     3,
                                     32,
                                     'conv2_1',
                                     is_training=is_training)
    color_conv2_2 = layers.conv_btn1(color_conv2_1,
                                     3,
                                     32,
                                     'conv2_2',
                                     is_training=is_training)
    color_pool2 = layers.maxpool(color_conv2_2, 4, 'pool2')
    # Block 3
    color_conv3_1 = layers.conv_btn1(color_pool2,
                                     3,
                                     64,
                                     'conv3_1',
                                     is_training=is_training)
    color_conv3_2 = layers.conv_btn1(color_conv3_1,
                                     3,
                                     64,
                                     'conv3_2',
                                     is_training=is_training)
    color_pool3 = layers.maxpool(color_conv3_2, 4, 'pool3')
    color_drop3 = layers.dropout(color_pool3, dropout_keep_prob, 'drop3')
    # Block 4
    color_conv4_1 = layers.conv_btn1(color_drop3,
                                     3,
                                     64,
                                     'conv4_1',
                                     is_training=is_training)
    color_conv4_2 = layers.conv_btn1(color_conv4_1,
                                     3,
                                     64,
                                     'conv4_2',
                                     is_training=is_training)
    color_pool4 = layers.maxpool(color_conv4_2, 4, 'pool4')
    color_drop4 = layers.dropout(color_pool4, dropout_keep_prob, 'drop4')

    # Block 5
    color_conv5_1 = layers.conv_btn1(color_drop4,
                                     3,
                                     128,
                                     'conv5_1',
                                     is_training=is_training)
    color_conv5_2 = layers.conv_btn1(color_conv5_1,
                                     3,
                                     128,
                                     'conv5_2',
                                     is_training=is_training)
    color_drop5 = layers.dropout(color_conv5_2, dropout_keep_prob, 'drop5')

    # Decoder Section
    # Block 1

    upsample61 = layers.deconv_upsample(color_drop5, 4, 'upsample6')
    upsample61 = Cropping1D(cropping=((0, 1)))(upsample61)
    concat6 = layers.concat(upsample61, color_conv4_2, 'concat6')
    color_conv6_1 = layers.conv_btn1(concat6,
                                     3,
                                     128,
                                     'conv6_1',
                                     is_training=is_training)
    # color_conv6_2 = layers.conv_btn1(color_conv6_1, 6, 128, 'conv6_2', is_training = is_training)
    color_drop6 = layers.dropout(color_conv6_1, dropout_keep_prob, 'drop6')
    # Block 2
    upsample7 = layers.deconv_upsample(color_drop6, 4, 'upsample7')
    # upsample7 = Cropping1D(cropping=((0, 1)))(upsample7)
    concat7 = layers.concat(upsample7, color_conv3_2, 'concat7')
    color_conv7_1 = layers.conv_btn1(concat7,
                                     3,
                                     64,
                                     'conv7_1',
                                     is_training=is_training)
    # color_conv7_2 = layers.conv_btn1(color_conv7_1, 6, 64, 'conv7_1', is_training = is_training)
    color_drop7 = layers.dropout(color_conv7_1, dropout_keep_prob, 'drop7')

    # Block 3
    upsample81 = layers.deconv_upsample(color_drop7, 4, 'upsample8')
    upsample81 = Cropping1D(cropping=((0, 1)))(upsample81)
    concat8 = layers.concat(upsample81, color_conv2_2, 'concat8')
    color_conv8_1 = layers.conv_btn1(concat8,
                                     3,
                                     32,
                                     'conv8_1',
                                     is_training=is_training)
    # color_conv8_2 = layers.conv_btn1(color_conv8_1, 3, 32, 'conv8_1', is_training = is_training)

    # Block 4
    upsample91 = layers.deconv_upsample(color_conv8_1, 4, 'upsample9')
    upsample91 = Cropping1D(cropping=((1, 2)))(upsample91)
    concat9 = layers.concat(upsample91, color_conv1_2, 'concat9')
    color_conv9_1 = layers.conv_btn1(concat9,
                                     3,
                                     32,
                                     'conv9_1',
                                     is_training=is_training)
    # color_conv9_2 = layers.conv_btn1(color_conv9_1, 3, 32,   'conv9_1', is_training = is_training)

    # Block 5
    score = layers.conv(color_conv9_1,
                        1,
                        num_classes,
                        'score',
                        activation_fn=None)
    logits = tf.reshape(score, (-1, num_classes))
    return logits
示例#8
0
def get_model(X, batch_size, image_dimension):

    input_shape = (batch_size, 3, image_dimension[0], image_dimension[1])
    all_parameters = []
    acc_parameters = []

    #############################################
    # a first convolution with 64 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        X, X, input_shape, 64, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # a second convolution with 128 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 128, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # 2 convolutional layers with 256 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 256, (3, 3))
    all_parameters += params
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 256, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # Fully connected
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 1024, (1, 1))
    all_parameters += params
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 1024, (1, 1))
    all_parameters += params

    # maxpool with size=(4, 4) and fully connected
    output, output_test, params, output_shape = avgpool(
        output, output_test, output_shape, (4, 4))
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 10, (1, 1))
    all_parameters += params

    output, output_test, params, output_shape, cacc_parameters = batch_norm(
        output, output_test, output_shape)
    acc_parameters += cacc_parameters
    all_parameters += params

    # softmax
    output = multi_dim_softmax(output)
    output_test = multi_dim_softmax(output_test)

    #
    return output, output_test, all_parameters, acc_parameters