def inception_block_3c(self, x, ct):
        self.inception_3c_double_3x3_reduce = Conv2D(
            kernel_size=1,
            filters=64,
            padding='valid',
            data_format=self.DATA_FORMAT,
            trainable=ct,
            name='inception_3c_double_3x3_reduce')
        self.inception_3c_double_3x3_reduce_bn = BatchNormalization(
            axis=1, trainable=ct, name='inception_3c_double_3x3_reduce_bn')
        self.inception_3c_double_3x3_1 = Conv2D(
            kernel_size=3,
            filters=96,
            padding='same',
            data_format=self.DATA_FORMAT,
            trainable=ct,
            name='inception_3c_double_3x3_1')
        self.inception_3c_double_3x3_1_bn = BatchNormalization(
            axis=1, trainable=ct, name='inception_3c_double_3x3_1_bn')

        x = self.inception_3c_double_3x3_reduce(x)
        x = self.inception_3c_double_3x3_reduce_bn(x)
        x = tf.nn.relu(x, name='inception_3c_relu_double_3x3_reduce_inp')

        x = self.inception_3c_double_3x3_1(x)
        x = self.inception_3c_double_3x3_1_bn(x)
        print(x)
        x = tf.nn.relu(x, name='inception_3c_relu_double_3x3_1_inp')
        x = tf.reshape(x, (-1, self.frm_num, 96, 28, 28), name='r2Dto3D')
        x = tf.transpose(x, [0, 2, 1, 3, 4])
        return x
Exemplo n.º 2
0
def g_block(x, filters, is_training, name):
    """Residual Block

  For a fast overview see Big GAN paper. Based on self-attention GAN code.
  """
    with tf.variable_scope(name):
        x0 = x
        x = tf.nn.relu(batch_norm(x, is_training, 'bn0'))
        x = upsample(x, 2)
        x = Conv2D(filters=filters,
                   kernel_size=3,
                   padding='SAME',
                   name='conv1')(x)
        x = tf.nn.relu(batch_norm(x, is_training, 'bn1'))
        x = Conv2D(filters=filters,
                   kernel_size=3,
                   padding='SAME',
                   name='conv2')(x)

        x0 = upsample(x0, 2)
        x0 = Conv2D(filters=filters,
                    kernel_size=1,
                    padding='SAME',
                    name='conv3')(x0)

        return x0 + x
Exemplo n.º 3
0
    def _build(self):
        self.s_input = tf.placeholder(tf.float32, [None] + list(self.state_shape), name='States')
        self.a_input = tf.placeholder(tf.float32, [None, self.n_actions], name='Actions')
        self.r_input = tf.placeholder(tf.float32, [None], name='Rewards')

        conv1 = Conv2D(32, 8, strides=(6, 6),
                       activation=tf.nn.relu,
                       kernel_initializer=tf.initializers.glorot_normal(),
                       bias_initializer=tf.initializers.glorot_normal(),
                       name='conv1')(self.s_input)
        conv2 = Conv2D(48, 6, strides=(3, 3),
                       activation=tf.nn.relu,
                       kernel_initializer=tf.initializers.glorot_normal(),
                       bias_initializer=tf.initializers.glorot_normal(),
                       name='conv2')(conv1)
        f = Flatten()(conv2)
        dense1 = Dense(512,
                       activation=tf.nn.relu,
                       kernel_initializer=tf.initializers.glorot_normal(),
                       bias_initializer=tf.initializers.glorot_normal(),
                       name='dense1')(f)
        dense_pi = Dense(128,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.initializers.glorot_normal(),
                         bias_initializer=tf.initializers.glorot_normal(),
                         name='dense_pi')(dense1)
        self.pi = Dense(self.n_actions,
                        activation=tf.nn.softmax,
                        kernel_initializer=tf.initializers.glorot_normal(),
                        bias_initializer=tf.initializers.glorot_normal(),
                        name='Pi')(dense_pi)
        self.pi = tf.clip_by_value(self.pi, 1e-8, 1.)
        dense_v = Dense(32,
                        activation=tf.nn.relu,
                        kernel_initializer=tf.initializers.glorot_normal(),
                        bias_initializer=tf.initializers.glorot_normal(),
                        name='dense_v')(dense1)
        self.v = Dense(1,
                       activation=None,
                       kernel_initializer=tf.initializers.glorot_normal(),
                       name='V')(dense_v)
        self.v = tf.squeeze(self.v, axis=1)

        log_policy = tf.log(tf.reduce_sum(self.pi * self.a_input, axis=1) + 1e-10)
        self.advantage = self.r_input - self.v

        loss_pi = -tf.multiply(log_policy, tf.stop_gradient(self.advantage))
        loss_v = 0.5 * tf.square(self.advantage)
        entropy_pi = - 0.05 * tf.reduce_sum(self.pi * tf.log(self.pi), axis=1)
        self.loss = tf.reduce_mean(loss_pi + loss_v - entropy_pi)
        opt = tf.train.AdamOptimizer(self.lr)
        self.train_op = opt.minimize(self.loss)
    def inception_part(self, input_x, ct):
        self.conv1_7x7_s2 = Conv2D(kernel_size=(7,7), filters=64, strides=2, padding='same', 
                            data_format=self.DATA_FORMAT, trainable=ct, name='conv1_7x7_s2')

        self.conv1_7x7_s2_bn = BatchNormalization(axis=1, trainable=ct, name='conv1_7x7_s2_bn')

        self.pool1_3x3_s2 = MaxPooling2D(pool_size=3, strides=2, padding='same', 
                                  data_format=self.DATA_FORMAT, name='pool1_3x3_s2')
        
        self.conv2_3x3_reduce = Conv2D(kernel_size=1, filters=64, trainable=ct, 
                                       data_format=self.DATA_FORMAT, name='conv2_3x3_reduce')
        
        self.conv2_3x3_reduce_bn = BatchNormalization(axis=1, trainable=ct, name='conv2_3x3_reduce_bn')
        
        self.conv2_3x3 = Conv2D(kernel_size=3, filters=192, padding='same', 
                                data_format=self.DATA_FORMAT, trainable=ct, name='conv2_3x3')
        
        self.conv2_3x3_bn = BatchNormalization(axis=1, trainable=ct, name='conv2_3x3_bn')
        
        self.pool2_3x3_s2 = MaxPooling2D(pool_size=3, strides=2, padding='same', 
                                         data_format=self.DATA_FORMAT, name='pool2_3x3_s2')

        # x = tf.reshape(input_x, (-1, 3, 224, 224))
        x = self.conv1_7x7_s2(input_x)

        x = self.conv1_7x7_s2_bn(x)
        x = tf.nn.relu(x, name='conv1_relu_7x7_inp')
        x = self.pool1_3x3_s2(x)


        x = self.conv2_3x3_reduce(x)
        x = self.conv2_3x3_reduce_bn(x)
        x = tf.nn.relu(x, name='conv2_relu_3x3_reduce_inp')

        x = self.conv2_3x3(x)
        x = self.conv2_3x3_bn(x)
        x = tf.nn.relu(x, name='conv2_relu_3x3_inp')

        x = self.pool2_3x3_s2(x)
        print(x)

        self.incep_3a = self.inception_block_3a(x, ct)
        print(self.incep_3a)
        self.incep_3b = self.inception_block_3b(self.incep_3a, ct)
        print(self.incep_3b)
        self.incep_3c = self.inception_block_3c(self.incep_3b, ct)
        print(self.incep_3c)
        return self.incep_3c
def conv_block_simple_2d(prevlayer,
                         num_filters,
                         prefix,
                         kernel_size=(3, 3),
                         initializer="he_normal",
                         strides=(1, 1)):

    # conv = Conv2D(filters=num_filters, kernel_size=kernel_size, padding="same", kernel_initializer=initializer, strides=strides, name=prefix + "_conv",
    #      data_format='channels_first')(prevlayer)

    # conv = Activation('relu', name=prefix + "_activation")(conv)

    # return conv

    conv = Conv2D(filters=num_filters,
                  kernel_size=kernel_size,
                  padding="same",
                  kernel_initializer=initializer,
                  strides=strides,
                  name=prefix + "_conv",
                  data_format='channels_first')(prevlayer)

    conv = BatchNormalization(name=prefix + "_bn", axis=1)(conv)

    # conv = Activation('relu', name=prefix + "_activation")(conv)
    conv = tf.nn.relu(conv, name=prefix + "_activation")

    return conv
Exemplo n.º 6
0
def conv(inputs,
         out_filters,
         ksize=(3, 3),
         strides=(1, 1),
         dilation=(1, 1),
         use_bias=True):
    """
    Convolution layer

    Parameters
    ----------
    inputs: Input tensor
    out_filters: Number of output filters
    ksize: Kernel size. One integer of tuple of two integers
    strides: Strides for moving kernel. One integer of tuple of two integers
    dilation: Dilation of kernel. One integer of tuple of two integers
    use_bias: Whether to use bias
    """
    return Conv2D(
        filters=out_filters,
        kernel_size=ksize,
        strides=strides,
        padding='same',
        dilation_rate=dilation,
        use_bias=use_bias,
        kernel_initializer=init_ops.glorot_uniform_initializer,
    )(inputs)
Exemplo n.º 7
0
def shortcut(input, residual):
	"""
	shortcut连接.
	"""

	input_shape = input.shape
	residual_shape = residual.shape
	#通常卷积越深,特征是越小,所以如果shortcut前后的尺寸不一(通常要的),就需要加上一个卷积变换
	#例如input_shape = (N, 40, 40, 64), residual_shape = (N, 20, 20, 128)
	#步长如下
	#如何理解呢?因为我们需要缩放,那么要不就池化要不就卷积来有效的缩小,这里选择的是卷积,设置好合适的步长
	stride_height = int(input_shape[1]/residual_shape[1])
	stride_width = int(input_shape[2]/residual_shape[2])

	equal_channels = (input_shape[3] == residual_shape[3])

	identity = input

	if stride_height > 1 or stride_width > 1 or not equal_channels:
		identity = Conv2D(
			residual_shape[3],
			kernel_size = (1,1),
			strides = (stride_height, stride_width),
			padding = "VALID",
			kernel_regularizer = tf.contrib.layers.l1_regularizer(0.001))(input)

	#把两个维度大小相同的向量相加,矩阵加法
	return identity + residual
def simple_cnn(input_shape):
    # NOTE: ThE INPUT_TENSOR IS THE SUBSAMPLED KSPACE
    # SO IT IS A COMPLEX128 TENSOR OF SHAPE
    # ( BATCH_DIM, CHANNELS_DIM, IMG_HEIGHT, IMG_WIDTH )

    img_input = Input(input_shape)

    conv1 = conv_block_simple_2d(prevlayer=img_input,
                                 num_filters=16,
                                 prefix="conv1")
    conv2 = conv_block_simple_2d(prevlayer=conv1,
                                 num_filters=16,
                                 prefix="conv1_2")
    conv3 = conv_block_simple_2d(prevlayer=conv2,
                                 num_filters=16,
                                 prefix="conv1_3")
    prediction = Conv2D(
        filters=2,
        kernel_size=(1, 1),
        activation="sigmoid",
        name="prediction",
        data_format="channels_first",
    )(conv3)
    # prediction = Conv2D(
    #     filters=1,
    #     kernel_size=(1, 1),
    #     activation="sigmoid",
    #     name="prediction",
    #     data_format="channels_first",
    # )(conv3)
    the_model = Model(inputs=img_input, outputs=prediction)

    return the_model
Exemplo n.º 9
0
def d_block(x, filters, name):
    with tf.variable_scope(name):
        x0 = x
        x = tf.nn.relu(x)
        x = Conv2D(filters, kernel_size=3, padding='SAME', name='conv1')(x)
        x = tf.nn.relu(x)
        x = Conv2D(filters, kernel_size=3, padding='SAME', name='conv2')(x)
        x = AveragePooling2D(pool_size=2,
                             strides=2,
                             padding='VALID',
                             name='avg1')(x)

        x0 = Conv2D(filters, kernel_size=3, padding='SAME', name='conv3')(x0)
        x0 = AveragePooling2D(pool_size=2,
                              strides=2,
                              padding='VALID',
                              name='avg1')(x0)

        return x0 + x
    def inception_block_3a(self, x, ct):
        self.inception_3a_1x1 = Conv2D(kernel_size=1, filters=64, data_format=self.DATA_FORMAT, trainable=ct, 
                                       name='inception_3a_1x1')
        self.inception_3a_1x1_bn = BatchNormalization(axis=1, trainable=ct, name='inception_3a_1x1_bn')
        self.inception_3a_3x3_reduce = Conv2D(kernel_size=1, filters=64, data_format=self.DATA_FORMAT, trainable=ct,
                                              name='inception_3a_3x3_reduce')
        self.inception_3a_3x3_reduce_bn = BatchNormalization(axis=1, trainable=ct, name='inception_3a_3x3_reduce_bn')
        self.inception_3a_3x3 = Conv2D(kernel_size=3, filters=64, padding='same', data_format=self.DATA_FORMAT, 
                                       trainable=ct, name='inception_3a_3x3')
        self.inception_3a_3x3_bn = BatchNormalization(axis=1, trainable=ct, name='inception_3a_3x3_bn')
        self.inception_3a_double_3x3_reduce = Conv2D(kernel_size=1, filters=64, data_format=self.DATA_FORMAT, 
                                                     trainable=ct, name='inception_3a_double_3x3_reduce')
        self.inception_3a_double_3x3_reduce_bn = BatchNormalization(axis=1, trainable=ct, 
                                                                    name='inception_3a_double_3x3_reduce_bn')
        self.inception_3a_double_3x3_1 = Conv2D(kernel_size=3, filters=96, padding='same', 
                                                data_format=self.DATA_FORMAT, trainable=ct, 
                                                name='inception_3a_double_3x3_1')
        self.inception_3a_double_3x3_1_bn = BatchNormalization(axis=1, trainable=ct, name='inception_3a_double_3x3_1_bn')
        self.inception_3a_double_3x3_2 = Conv2D(kernel_size=3, filters=96, padding='same', 
                                                data_format=self.DATA_FORMAT, trainable=ct, 
                                                name='inception_3a_double_3x3_2')
        self.inception_3a_double_3x3_2_bn = BatchNormalization(axis=1, trainable=ct, name='inception_3a_double_3x3_2_bn')
        self.inception_3a_pool = MaxPooling2D(pool_size=3, strides=1, padding='same', data_format=self.DATA_FORMAT, 
                                              name='inception_3a_pool')
        self.inception_3a_pool_proj = Conv2D(kernel_size=1, filters=32, data_format=self.DATA_FORMAT, trainable=ct,
                                             name='inception_3a_pool_proj')
        self.inception_3a_pool_proj_bn = BatchNormalization(axis=1, trainable=ct, name='inception_3a_pool_proj_bn')

        x1 = self.inception_3a_1x1(x)
        x1 = self.inception_3a_1x1_bn(x1)
        x1 = tf.nn.relu(x1)

        x2 = self.inception_3a_3x3_reduce(x)
        x2 = self.inception_3a_3x3_reduce_bn(x2)
        x2 = tf.nn.relu(x2)
        x2 = self.inception_3a_3x3(x2)
        x2 = self.inception_3a_3x3_bn(x2)
        x2 = tf.nn.relu(x2)

        x3 = self.inception_3a_double_3x3_reduce(x)
        x3 = self.inception_3a_double_3x3_reduce_bn(x3)
        x3 = tf.nn.relu(x3)
        x3 = self.inception_3a_double_3x3_1(x3)
        x3 = self.inception_3a_double_3x3_1_bn(x3)
        x3 = tf.nn.relu(x3)
        x3 = self.inception_3a_double_3x3_2(x3)
        x3 = self.inception_3a_double_3x3_2_bn(x3)
        x3 = tf.nn.relu(x3)

        x4 = self.inception_3a_pool(x)
        x4 = self.inception_3a_pool_proj(x4)
        x4 = self.inception_3a_pool_proj_bn(x4)
        x4 = tf.nn.relu(x4)
        
        x = tf.concat([x1, x2, x3, x4], axis=1, name='inception_3a_output')
        return x
Exemplo n.º 11
0
def Network(features, labels, mode) :
    
    # input shape : 640 x 640 x 1 
    input_layer = tf.reshape(features["x"],[-1,640,640,1])
    
    # encoder
    down1 = MaxPooling2D([2,2],2)(conv_res_conv(input_layer,64))
    down2 = MaxPooling2D([2,2],2)(conv_res_conv(down1,128))
    down3 = MaxPooling2D([2,2],2)(conv_res_conv(down2,256))
    down4 = MaxPooling2D([2,2],2)(conv_res_conv(down3,512))
    
    # bridge 
    bridge = conv_res_conv(down4,1024)
    
    # decoder
    deconv4 = deconv2d_with_bn_and_act(bridge,down4.get_shape())
    merge4 = skip_connection(deconv4,down4)
    upscale4 = conv_res_conv(merge4,512)
    
    deconv3 = deconv2d_with_bn_and_act(upscale4,down3.get_shape())
    merge3 = skip_connection(deconv3,down3)
    upscale3 = conv_res_conv(merge3,256)
    
    deconv2 = deconv2d_with_bn_and_act(upscale3,down2.get_shape())
    merge2 = skip_connection(deconv2,down2)
    upscale2 = conv_res_conv(merge2,128)
    
    deconv1 = deconv2d_with_bn_and_act(upscale2,down1.get_shape())
    merge1 = skip_connection(deconv1,down1)
    upscale1 = conv_res_conv(merge1,64)
    
    output = Conv2D(1,kernel_size=[1,1],padding="same")(upscale1)
    
    predictions = { "outputs" : output }
    
    if mode == ModeKeys.PREDICT : 
        return EstimatorSpec(mode=mode,predictions=predictions)
    
    loss = tf.losses.mean_pairwise_squared_error(labels,predictions)
    
    if mode == ModeKeys.TRAIN : 
        optimizer = tf.train.AdamOptimizer()
        train_op = optimizer.minimize (
            loss = loss, 
            global_step = tf.train.get_global_step())
        return EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
    
    eval_metric_ops = {
        "acc" : tf.metrics.accuracy(labels=labels,predictions=predictions["outputs"])
    }
    
    return EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)
Exemplo n.º 12
0
def conv2d_bn(x, nb_filter, kernel_size, strides = (1, 1), padding = "SAME"):
	"""
	conv2d -> batch normalization -> relu.
	"""
	x = Conv2D(
		nb_filter,
		kernel_size,
		strides,
		padding,
		kernel_regularizer = tf.contrib.layers.l1_regularizer(0.001))(x)
	x = BatchNormalization()(x)
	x = relu(x)

	return x
Exemplo n.º 13
0
    def __init__(self):
        super(Encoder, self).__init__()
        arg = {'activation': tf.nn.relu, 'padding': 'same'}
        self.conv_11 = Conv2D(name='e_conv_11',
                              filters=64,
                              kernel_size=7,
                              strides=(2, 2),
                              **arg)
        self.conv_12 = Conv2D(name='e_conv_12',
                              filters=64,
                              kernel_size=7,
                              strides=(2, 2),
                              **arg)
        self.pool_1 = MaxPooling2D(name='e_pool_1',
                                   pool_size=4,
                                   strides=(2, 2),
                                   padding='same')
        self.compress_11 = AveragePooling2D(name='e_comp_11',
                                            pool_size=5,
                                            strides=(3, 3),
                                            padding='same')
        self.compress_12 = Flatten()
        self.compress_13 = Dense(name='e_comp_13',
                                 units=128,
                                 activation=None,
                                 use_bias=False)
        #  activity_regularizer=tf.keras.regularizers.l2(l=0.01))
        self.batch_norm_1 = BatchNormalization(name='e_bn_1')
        self.drop_1 = Dropout(name='e_drop_1', rate=0.5)

        self.conv_21 = Conv2D(name='e_conv_21',
                              filters=128,
                              kernel_size=5,
                              strides=(1, 1),
                              **arg)
        self.conv_22 = Conv2D(name='e_conv_22',
                              filters=128,
                              kernel_size=5,
                              strides=(1, 1),
                              **arg)
        self.pool_2 = MaxPooling2D(name='e_pool_2',
                                   pool_size=4,
                                   strides=(2, 2),
                                   padding='same')
        self.compress_21 = AveragePooling2D(name='e_comp_21',
                                            pool_size=5,
                                            strides=(3, 3),
                                            padding='same')
        self.compress_22 = Flatten()
        self.compress_23 = Dense(name='e_comp_23',
                                 units=128,
                                 activation=None,
                                 use_bias=False)
        #  activity_regularizer=tf.keras.regularizers.l2(l=0.01))
        self.batch_norm_2 = BatchNormalization(name='e_bn_2')
        self.drop_2 = Dropout(name='e_drop_2', rate=0.5)

        self.conv_31 = Conv2D(name='e_conv_31',
                              filters=256,
                              kernel_size=3,
                              strides=(1, 1),
                              **arg)
        self.conv_32 = Conv2D(name='e_conv_32',
                              filters=256,
                              kernel_size=3,
                              strides=(1, 1),
                              **arg)
        self.pool_3 = MaxPooling2D(name='e_pool_3',
                                   pool_size=2,
                                   strides=(2, 2),
                                   padding='same')
        self.compress_31 = AveragePooling2D(name='e_comp_31',
                                            pool_size=3,
                                            strides=(1, 1),
                                            padding='same')
        self.compress_32 = Flatten()
        self.compress_33 = Dense(name='e_comp_33',
                                 units=128,
                                 activation=None,
                                 use_bias=False)
        #  activity_regularizer=tf.keras.regularizers.l2(l=0.01))
        self.batch_norm_3 = BatchNormalization(name='e_bn_3')
        self.drop_3 = Dropout(name='e_drop_3', rate=0.5)
def unet_7_layers(input_tensor):

    # print('INPUT IMAGE SHAPE')
    # print(input_tensor.shape)

    mp_param = (2, 2)  # (1,2,2)
    stride_param = (2, 2)
    d_format = "channels_first"
    pad = "same"
    us_param = (2, 2)
    kern = (3, 3)

    # filt=(32,64,128,256,512)
    filt = (32, 64, 128, 256)
    # filt=(64,128,256,512,1024)

    conv1 = conv_block_simple_2d(prevlayer=input_tensor,
                                 num_filters=filt[0],
                                 prefix="conv1",
                                 kernel_size=kern)
    conv1 = conv_block_simple_2d(prevlayer=conv1,
                                 num_filters=filt[0],
                                 prefix="conv1_1",
                                 kernel_size=kern)
    pool1 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool1")(conv1)

    conv2 = conv_block_simple_2d(prevlayer=pool1,
                                 num_filters=filt[1],
                                 prefix="conv2",
                                 kernel_size=kern)
    conv2 = conv_block_simple_2d(prevlayer=conv2,
                                 num_filters=filt[1],
                                 prefix="conv2_1",
                                 kernel_size=kern)
    pool2 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool2")(conv2)

    conv3 = conv_block_simple_2d(prevlayer=pool2,
                                 num_filters=filt[2],
                                 prefix="conv3",
                                 kernel_size=kern)
    conv3 = conv_block_simple_2d(prevlayer=conv3,
                                 num_filters=filt[2],
                                 prefix="conv3_1",
                                 kernel_size=kern)
    pool3 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool3")(conv3)

    conv4 = conv_block_simple_2d(prevlayer=pool3,
                                 num_filters=filt[3],
                                 prefix="conv_4",
                                 kernel_size=kern)
    conv4 = conv_block_simple_2d(prevlayer=conv4,
                                 num_filters=filt[3],
                                 prefix="conv_4_1",
                                 kernel_size=kern)
    conv4 = conv_block_simple_2d(prevlayer=conv4,
                                 num_filters=filt[3],
                                 prefix="conv_4_2",
                                 kernel_size=kern)

    up5 = Conv2DTranspose(filters=filt[2],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv4)

    up5 = concatenate([up5, conv3], axis=1)
    conv5 = conv_block_simple_2d(prevlayer=up5,
                                 num_filters=filt[2],
                                 prefix="conv5_1")
    conv5 = conv_block_simple_2d(prevlayer=conv5,
                                 num_filters=filt[2],
                                 prefix="conv5_2")

    up6 = Conv2DTranspose(filters=filt[1],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv5)

    up6 = concatenate([up6, conv2], axis=1)
    conv6 = conv_block_simple_2d(prevlayer=up6,
                                 num_filters=filt[1],
                                 prefix="conv6_1")
    conv6 = conv_block_simple_2d(prevlayer=conv6,
                                 num_filters=filt[1],
                                 prefix="conv6_2")

    up7 = Conv2DTranspose(filters=filt[0],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv6)
    up7 = concatenate([up7, conv1], axis=1)
    conv7 = conv_block_simple_2d(prevlayer=up7,
                                 num_filters=filt[0],
                                 prefix="conv7_1")
    conv7 = conv_block_simple_2d(prevlayer=conv7,
                                 num_filters=filt[0],
                                 prefix="conv7_2")

    # conv9 = SpatialDropout2D(0.2,data_format=d_format)(conv9)

    prediction = Conv2D(filters=1,
                        kernel_size=(1, 1),
                        activation="sigmoid",
                        name="prediction",
                        data_format=d_format)(conv7)

    # print('PREDICTION SHAPE')
    # print(prediction.shape)

    return prediction
def unet_9_layers(input_tensor, output_tensor_channels=1):

    mp_param = (2, 2)
    stride_param = (2, 2)
    d_format = "channels_first"
    pad = "same"
    kern = (3, 3)

    filt = (32, 64, 128, 256, 512)
    # filt=(64,128,256,512,1024)

    conv1 = conv_block_simple_2d(prevlayer=input_tensor,
                                 num_filters=filt[0],
                                 prefix="conv1")
    conv1 = conv_block_simple_2d(prevlayer=conv1,
                                 num_filters=filt[0],
                                 prefix="conv1_1")
    pool1 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool1")(conv1)

    conv2 = conv_block_simple_2d(prevlayer=pool1,
                                 num_filters=filt[1],
                                 prefix="conv2")
    conv2 = conv_block_simple_2d(prevlayer=conv2,
                                 num_filters=filt[1],
                                 prefix="conv2_1")
    pool2 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool2")(conv2)

    conv3 = conv_block_simple_2d(prevlayer=pool2,
                                 num_filters=filt[2],
                                 prefix="conv3")
    conv3 = conv_block_simple_2d(prevlayer=conv3,
                                 num_filters=filt[2],
                                 prefix="conv3_1")
    pool3 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool3")(conv3)

    conv4 = conv_block_simple_2d(prevlayer=pool3,
                                 num_filters=filt[3],
                                 prefix="conv4")
    conv4 = conv_block_simple_2d(prevlayer=conv4,
                                 num_filters=filt[3],
                                 prefix="conv4_1")
    pool4 = MaxPooling2D(pool_size=mp_param,
                         strides=stride_param,
                         padding="same",
                         data_format="channels_first",
                         name="pool4")(conv4)

    conv5 = conv_block_simple_2d(prevlayer=pool4,
                                 num_filters=filt[4],
                                 prefix="conv_5")
    conv5 = conv_block_simple_2d(prevlayer=conv5,
                                 num_filters=filt[3],
                                 prefix="conv_5_1")
    # conv5 = conv_block_simple_2d(prevlayer=conv5, num_filters=filt[4], prefix="conv_5_2")

    # 4 is 512,
    # 3 is 256
    # 2 is 128

    up6 = Conv2DTranspose(filters=filt[3],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv5)
    up6 = concatenate([up6, conv4], axis=1)

    conv6 = conv_block_simple_2d(prevlayer=up6,
                                 num_filters=filt[3],
                                 prefix="conv6_1")
    conv6 = conv_block_simple_2d(prevlayer=conv6,
                                 num_filters=filt[2],
                                 prefix="conv6_2")

    up7 = Conv2DTranspose(filters=filt[2],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv6)
    up7 = concatenate([up7, conv3], axis=1)
    conv7 = conv_block_simple_2d(prevlayer=up7,
                                 num_filters=filt[2],
                                 prefix="conv7_1")
    conv7 = conv_block_simple_2d(prevlayer=conv7,
                                 num_filters=filt[1],
                                 prefix="conv7_2")

    up8 = Conv2DTranspose(filters=filt[1],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv7)
    up8 = concatenate([up8, conv2], axis=1)
    conv8 = conv_block_simple_2d(prevlayer=up8,
                                 num_filters=filt[1],
                                 prefix="conv8_1")
    conv8 = conv_block_simple_2d(prevlayer=conv8,
                                 num_filters=filt[0],
                                 prefix="conv8_2")

    up9 = Conv2DTranspose(filters=filt[0],
                          kernel_size=kern,
                          strides=(2, 2),
                          padding="same",
                          data_format="channels_first")(conv8)
    up9 = concatenate([up9, conv1], axis=1)
    conv9 = conv_block_simple_2d(prevlayer=up9,
                                 num_filters=filt[0],
                                 prefix="conv9_1")
    conv9 = conv_block_simple_2d(prevlayer=conv9,
                                 num_filters=filt[0],
                                 prefix="conv9_2")

    # conv9 = SpatialDropout2D(0.2,data_format=d_format)(conv9)

    # prediction = Conv2D(filters=1, kernel_size=(1, 1), activation="sigmoid", name="prediction", data_format=d_format)(conv9)

    prediction = Conv2D(filters=output_tensor_channels,
                        kernel_size=(1, 1),
                        activation="sigmoid",
                        use_bias=True,
                        name="prediction",
                        data_format=d_format)(conv9)
    # prediction = Conv2D(filters = output_tensor_channels, kernel_size = (1, 1), name="prediction",data_format=d_format)(conv9)

    return prediction
Exemplo n.º 16
0
    def __init__(self):
        super(Discriminator, self).__init__()
        arg = {'activation': tf.nn.relu, 'padding': 'same'}
        self.conv_11 = Conv2D(name='di_conv_11',
                              filters=64,
                              kernel_size=(5, 5),
                              strides=(2, 2),
                              **arg)
        self.conv_12 = Conv2D(name='di_conv_12',
                              filters=64,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_13 = Conv2D(name='di_conv_13',
                              filters=64,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.pool_1 = MaxPooling2D(name='di_pool_1',
                                   pool_size=(5, 5),
                                   strides=(2, 2),
                                   padding='same')
        self.drop_1 = Dropout(0.5)

        self.conv_21 = Conv2D(name='di_conv_21',
                              filters=128,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_22 = Conv2D(name='di_conv_22',
                              filters=128,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_23 = Conv2D(name='di_conv_23',
                              filters=128,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.pool_2 = MaxPooling2D(name='di_pool_2',
                                   pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')
        self.drop_2 = Dropout(0.5)

        self.conv_31 = Conv2D(name='di_conv_31',
                              filters=256,
                              kernel_size=(3, 3),
                              strides=(2, 2),
                              **arg)
        self.conv_32 = Conv2D(name='di_conv_32',
                              filters=256,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.conv_33 = Conv2D(name='di_conv_33',
                              filters=256,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              **arg)
        self.pool_3 = MaxPooling2D(name='di_pool_3',
                                   pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')
        self.drop_3 = Dropout(0.5)

        self.flattener = Flatten()
        self.drop_4 = Dropout(0.5)
        self.classifier_1 = Dense(name='di_cls_1',
                                  units=512,
                                  activation=tf.nn.relu,
                                  use_bias=True)
        self.drop_5 = Dropout(0.5)
        self.classifier_2 = Dense(name='di_cls_2',
                                  units=256,
                                  activation=tf.nn.relu,
                                  use_bias=True)
        self.classifier_3 = Dense(name='di_cls_3',
                                  units=2,
                                  activation=None,
                                  use_bias=True)
Exemplo n.º 17
0
def conv2d_with_bn_and_act(input_,filters,k_size=[3,3],strides=[1,1],padding="same") :
    return BatchNormalization()(Conv2D(filters,kernel_size=k_size,strides=strides,activation=tf.nn.relu)(input_))
Exemplo n.º 18
0
    'epochs': 5,
    'batch_size': 64,
    'num_channels': 64,
})
"""

NUM_CHANNELS = 64

BN1 = BatchNormalization()
BN2 = BatchNormalization()
BN3 = BatchNormalization()
BN4 = BatchNormalization()
BN5 = BatchNormalization()
BN6 = BatchNormalization()

CONV1 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1, padding='same')
CONV2 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1, padding='same')
CONV3 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1)
CONV4 = Conv2D(NUM_CHANNELS, kernel_size=3, strides=1)

FC1 = Dense(128)
FC2 = Dense(64)
FC3 = Dense(7)

DROP1 = Dropout(0.3)
DROP2 = Dropout(0.3)


# 6x7 input
# https://github.com/PaddlePaddle/PARL/blob/0915559a1dd1b9de74ddd2b261e2a4accd0cd96a/benchmark/torch/AlphaZero/submission_template.py#L496
def modified_cnn(inputs, **kwargs):
Exemplo n.º 19
0
    def __init__(self):
        self.pad0 = [[0, 0], [0, 0], [0, 1], [0, 0]]
        self.pad1 = [[0, 0], [0, 0], [1, 0], [0, 0]]

        self.CM_2 = Conv2D(filters=message_hidden_size,
                           kernel_size=1,
                           activation=tf.nn.relu,
                           kernel_initializer=conv_initializer,
                           name='CM_2')
        self.CM_1 = Conv2D(filters=message_hidden_size,
                           kernel_size=1,
                           activation=tf.nn.relu,
                           kernel_initializer=conv_initializer,
                           name='CM_1')
        self.CM_0 = Conv2D(filters=message_size * 2,
                           kernel_size=1,
                           activation=tf.nn.relu,
                           kernel_initializer=conv_initializer,
                           name='CM_0')  # * 2

        # Weights for input vectors of shape (message_size, hidden_size)
        self.Wr = Conv2D(filters=hidden_size,
                         kernel_size=1,
                         activation=None,
                         kernel_initializer=conv_initializer,
                         use_bias=True,
                         name='C_Wr')
        self.Wz = Conv2D(filters=hidden_size,
                         kernel_size=1,
                         activation=None,
                         kernel_initializer=conv_initializer,
                         use_bias=True,
                         name='C_Wz')
        self.Wh = Conv2D(filters=hidden_size,
                         kernel_size=1,
                         activation=None,
                         kernel_initializer=conv_initializer,
                         use_bias=True,
                         name='C_Wh')

        # Weights for hidden vectors of shape (hidden_size, hidden_size)
        self.Ur = Conv2D(filters=hidden_size,
                         kernel_size=1,
                         activation=None,
                         kernel_initializer=conv_initializer,
                         use_bias=True,
                         name='C_Ur')
        self.Uz = Conv2D(filters=hidden_size,
                         kernel_size=1,
                         activation=None,
                         kernel_initializer=conv_initializer,
                         use_bias=True,
                         name='C_Uz')
        self.Uh = Conv2D(filters=hidden_size,
                         kernel_size=1,
                         activation=None,
                         kernel_initializer=conv_initializer,
                         use_bias=True,
                         name='C_Uh')

        # Biases for hidden vectors of shape (hidden_size,)
        # self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, 1, hidden_size), mean=0, stddev=0.01), name='br')
        # self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, 1, hidden_size), mean=0, stddev=0.01), name='bz')
        # self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, 1, hidden_size), mean=0, stddev=0.01), name='bh')
        # self.Br = tf.tile(self.br, [input_dimensions, input_dimensions, 1], name='C_Br')
        # self.Bz = tf.tile(self.bz, [input_dimensions, input_dimensions, 1], name='C_Bz')
        # self.Bh = tf.tile(self.bh, [input_dimensions, input_dimensions, 1], name='C_Bh')

        self.X = tf.ones([time_step, 1])
        self.H_0 = tf.tensordot(input_layer,
                                tf.zeros(dtype=dtype, shape=(1, hidden_size)),
                                axes=1,
                                name='H_0')
        self.H_ts = tf.scan(self.forward_pass,
                            self.X,
                            initializer=self.H_0,
                            name='H_ts')
        self.H_cur = tf.squeeze(tf.slice(self.H_ts,
                                         [time_step - 1, 0, 0, 0, 0],
                                         [1, -1, -1, -1, -1]),
                                axis=0,
                                name='H_cur')
Exemplo n.º 20
0
NAME = "CATS_VS_DOGS_CNN"

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Sesseion(config=tf.ConfigProto(gpu_options=gpu_options))

pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)

pickle_in = open("y.pickle", "rb")
y = pickle.load(pickle_in)

X = X / 255.0

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors

model.add(Dense(64))

model.add(Dense(1))
model.add(Activation("sigmoid"))

tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
Exemplo n.º 21
0
def MobilenetV2(inputs, alpha=1.0, num_classes=1000, include_top=True):
    """Implementation of MobilenetV2"""

    with tf.variable_scope('MobilenetV2'):
        with tf.variable_scope('Conv'):
            first_block = _make_divisible(32 * alpha, 8)
            x = Conv2D(first_block,
                       3,
                       strides=2,
                       padding='same',
                       use_bias=False,
                       name='Conv2D')(inputs)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name='BatchNorm')(x)
            x = Activation(relu6)(x)

        x = inverted_residuals(x,
                               16,
                               3,
                               stride=1,
                               expansion=1,
                               block_id=0,
                               alpha=alpha,
                               residual=False)

        x = inverted_residuals(x,
                               24,
                               3,
                               stride=2,
                               expansion=6,
                               block_id=1,
                               alpha=alpha,
                               residual=False)
        x = inverted_residuals(x,
                               24,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=2,
                               alpha=alpha)

        x = inverted_residuals(x,
                               32,
                               3,
                               stride=2,
                               expansion=6,
                               block_id=3,
                               alpha=alpha,
                               residual=False)
        x = inverted_residuals(x,
                               32,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=4,
                               alpha=alpha)
        x = inverted_residuals(x,
                               32,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=5,
                               alpha=alpha)

        x = inverted_residuals(x,
                               64,
                               3,
                               stride=2,
                               expansion=6,
                               block_id=6,
                               alpha=alpha,
                               residual=False)
        x = inverted_residuals(x,
                               64,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=7,
                               alpha=alpha)
        x = inverted_residuals(x,
                               64,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=8,
                               alpha=alpha)
        x = inverted_residuals(x,
                               64,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=9,
                               alpha=alpha)

        x = inverted_residuals(x,
                               96,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=10,
                               alpha=alpha,
                               residual=False)
        x = inverted_residuals(x,
                               96,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=11,
                               alpha=alpha)
        x = inverted_residuals(x,
                               96,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=12,
                               alpha=alpha)

        x = inverted_residuals(x,
                               160,
                               3,
                               stride=2,
                               expansion=6,
                               block_id=13,
                               alpha=alpha,
                               residual=False)
        x = inverted_residuals(x,
                               160,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=14,
                               alpha=alpha)
        x = inverted_residuals(x,
                               160,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=15,
                               alpha=alpha)

        x = inverted_residuals(x,
                               320,
                               3,
                               stride=1,
                               expansion=6,
                               block_id=16,
                               alpha=alpha,
                               residual=False)

        x = Conv2D(_make_divisible(1280 * alpha, 8), 1, use_bias=False)(x)
        x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
        x = Activation(relu6)(x)

        if include_top:
            with tf.variable_scope('Predictions'):
                x = AvgPool2D((7, 7))(x)
                x = Conv2D(num_classes, 1, activation='softmax',
                           use_bias=True)(x)
                x = Reshape((num_classes, ), name='Reshape_1')(x)
        return x
Exemplo n.º 22
0
def inverted_residuals(inputs,
                       filters,
                       kernel,
                       stride,
                       expansion=1,
                       alpha=1.0,
                       atrous_rate=1,
                       residual=True,
                       block_id=None):
    """Define Inveterd Residuals Architecture.

  inputs --> 

  1x1 Conv --> Batch Norm --> Relu6 -->
  3x3 DepthWise --> Batch Norm --> Relu6 -->
  1x1 Conv --> Batch Norm --> Relu6
  
  -- > Outputs

  Args:
    inputs - tf.float32 4D Tensor
    filters: number of expected output channels
    strides:
  """
    scope = 'expanded_conv_' + str(block_id) if block_id else 'expanded_conv'
    with tf.variable_scope(scope):
        # #######################################################
        # Expand and Pointwise
        # #######################################################
        if block_id:
            with tf.variable_scope('expand'):
                in_channels = inputs.get_shape().as_list()[-1]
                x = Conv2D(filters=expansion * in_channels,
                           kernel_size=1,
                           padding='SAME',
                           use_bias=False,
                           activation=None)(inputs)
                x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
                x = Activation(relu6)(x)
        else:
            x = inputs
        # ########################################################
        # Depthwise
        # ########################################################
        with tf.variable_scope('depthwise'):
            x = DepthwiseConv2D(kernel_size=kernel,
                                strides=stride,
                                activation=None,
                                use_bias=False,
                                dilation_rate=(atrous_rate, atrous_rate),
                                padding='SAME')(x)
            x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
            x = Activation(relu6)(x)

        # ########################################################
        # Linear Projection
        # ########################################################
        with tf.variable_scope('project'):
            pointwise_filters = int(filters * alpha)
            pointwise_filters = _make_divisible(pointwise_filters,
                                                8)  # Why 8???
            x = Conv2D(filters=pointwise_filters,
                       kernel_size=1,
                       padding='SAME',
                       use_bias=False,
                       activation=None)(x)
            x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
            x = Activation(relu6)(x)
            if residual:
                x = Add()([inputs, x])

            return x
Exemplo n.º 23
0
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.layers import Dense, Conv2D, Flattem, Dropout, MaxPooling2D

model = Sequential(
    Conv2D(128, (4, 4),
           padding='same',
           activation='relu',
           input_shape=(6, 7, 1)), MaxPooling2D(pool_size=(2, 2)),
    Dense(64, activation='relu'), Dense(64, activation='relu'), Dense(1))

optimizer = tf.keras.optimizers.Adam()

model.compile(loss='mean_squared_error',
              optimizer=optimizer,
              metrics=['accuracy'])
Exemplo n.º 24
0
                        [-1, input_dimensions, input_dimensions_m1, size])
        ver0 = tf.slice(edge, [0, input_dimensions, 0, 0],
                        [-1, input_dimensions, input_dimensions_m1, size])
        ver1 = tf.slice(edge, [0, input_dimensions, 0, size],
                        [-1, input_dimensions, input_dimensions_m1, size])
        hor_ = tf.pad(hor0, self.pad0) + tf.pad(hor1, self.pad1)
        ver_ = tf.pad(ver0, self.pad0) + tf.pad(ver1, self.pad1)
        return tf.add(hor_, tf.transpose(ver_, [0, 2, 1, 3]), name=name)


main_layers = GNN_GRU()
H_cur = main_layers.get_current_state()

map_2 = Conv2D(filters=message_hidden_size,
               kernel_size=1,
               activation=tf.nn.relu,
               kernel_initializer=conv_initializer,
               name='CR_2')(H_cur)
map_1 = Conv2D(filters=message_hidden_size,
               kernel_size=1,
               activation=tf.nn.relu,
               kernel_initializer=conv_initializer,
               name='CR_1')(map_2)
map_0 = Conv2D(filters=label_size,
               kernel_size=1,
               activation=tf.nn.softmax,
               kernel_initializer=conv_initializer,
               name='CR_0')(map_1)
output_layer = map_0  # tf.subtract(1., map_0, name='soft_output') #
label_layer = tf.cast(tf.argmax(output_layer, axis=-1),
                      dtype=tf.int32,
Exemplo n.º 25
0
def Simple_VGG_19(inputs, num_classes):

    outputs = Conv2D(filters=32,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(inputs)
    outputs = Conv2D(filters=32,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    outputs = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(outputs)

    outputs = Conv2D(filters=64,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    outputs = Conv2D(filters=64,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    outputs = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(outputs)

    outputs = Conv2D(filters=128,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    outputs = Conv2D(filters=128,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    #outputs = Conv2D(filters = 256, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    #outputs = Conv2D(filters = 256, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    outputs = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(outputs)

    outputs = Conv2D(filters=256,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    outputs = Conv2D(filters=256,
                     kernel_size=(3, 3),
                     strides=(1, 1),
                     activation="relu",
                     padding="SAME")(outputs)
    #outputs = Conv2D(filters = 512, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    #outputs = Conv2D(filters = 512, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    outputs = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(outputs)

    #outputs = Conv2D(filters = 512, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    #outputs = Conv2D(filters = 512, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    #outputs = Conv2D(filters = 512, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    #outputs = Conv2D(filters = 512, kernel_size = (3,3), strides = (1,1), activation = "relu", padding = "SAME")(outputs)
    #outputs = MaxPooling2D(pool_size = (2,2), strides = (2,2))(outputs)

    outputs = tf.layers.Flatten()(outputs)

    #outputs = tf.layers.Dense(4096, activation = "relu")(outputs)
    #outputs = tf.layers.Dense(4096, activation = "relu")(outputs)
    #outputs = tf.layers.Dense(1000, activation = "relu")(outputs)
    outputs = tf.layers.Dense(512, activation="relu")(outputs)
    outputs = tf.layers.Dense(512, activation="relu")(outputs)
    outputs = tf.layers.Dense(64, activation="relu")(outputs)
    outputs = tf.layers.Dense(num_classes, activation="relu")(outputs)

    return outputs
Exemplo n.º 26
0
    def __init__(self):
        super(Decoder, self).__init__()
        arg = {'activation': tf.nn.relu, 'padding': 'same'}

        self.deconv_11 = Conv2DTranspose(name='d_deconv_11',
                                         filters=512,
                                         kernel_size=(5, 5),
                                         strides=(2, 2),
                                         **arg)
        self.deconv_12 = Conv2DTranspose(name='d_deconv_12',
                                         filters=512,
                                         kernel_size=(5, 5),
                                         strides=(2, 2),
                                         **arg)
        self.deconv_13 = Conv2D(name='d_deconv_13',
                                filters=256,
                                kernel_size=(3, 3),
                                strides=(1, 1),
                                **arg)
        # self.batch_norm_1 = BatchNormalization(name='d_bn_1')
        # self.drop_1  = Dropout(name='d_drop_1', rate=0.5)

        self.deconv_21 = Conv2DTranspose(name='d_deconv_21',
                                         filters=256,
                                         kernel_size=(5, 5),
                                         strides=(2, 2),
                                         **arg)
        self.deconv_22 = Conv2DTranspose(name='d_deconv_22',
                                         filters=256,
                                         kernel_size=(5, 5),
                                         strides=(2, 2),
                                         **arg)
        self.deconv_23 = Conv2D(name='d_deconv_23',
                                filters=128,
                                kernel_size=(3, 3),
                                strides=(1, 1),
                                **arg)
        # self.batch_norm_2 = BatchNormalization(name='d_bn_2')
        # self.drop_2  = Dropout(name='d_drop_2', rate=0.5)

        self.deconv_31 = Conv2DTranspose(name='d_deconv_31',
                                         filters=128,
                                         kernel_size=(5, 5),
                                         strides=(2, 2),
                                         **arg)
        self.deconv_32 = Conv2D(name='d_deconv_32',
                                filters=64,
                                kernel_size=(3, 3),
                                strides=(1, 1),
                                **arg)
        self.deconv_33 = Conv2D(name='d_deconv_33',
                                filters=64,
                                kernel_size=(3, 3),
                                strides=(1, 1),
                                **arg)
        # self.batch_norm_3 = BatchNormalization(name='d_bn_3')
        # self.drop_3  = Dropout(name='d_drop_3', rate=0.5)

        self.deconv_4 = Conv2D(name='d_deconv_4',
                               filters=3,
                               kernel_size=(3, 3),
                               strides=(1, 1),
                               padding='same')