Exemplo n.º 1
0
def attention_gate(hi_input, lo_input, filters, keep_prob_):
    hi_channel_att = global_avg_pool(hi_input, name='hi_channel_att')
    lo_channel_att = global_avg_pool(lo_input, name='lo_channel_att')
    hi_channel_att = tf.layers.conv2d(hi_channel_att,
                                      filters,
                                      1,
                                      padding='same',
                                      kernel_initializer='he_normal')
    hi_channel_att = tf.nn.relu(hi_channel_att)
    lo_channel_att = tf.layers.conv2d(lo_channel_att,
                                      filters,
                                      1,
                                      padding='same',
                                      kernel_initializer='he_normal')
    lo_channel_att = tf.nn.relu(lo_channel_att)
    att = tf.add(hi_channel_att, lo_channel_att)
    att = tf.layers.conv2d(att,
                           filters,
                           1,
                           padding='same',
                           kernel_initializer='he_normal')
    att = tf.nn.softmax(att)
    output = att * lo_input
    output = tf.concat([hi_input, output], 3)
    return output
Exemplo n.º 2
0
def selective_kernel_layer(sk_conv1, sk_conv2, sk_conv3, middle, out_dim):
    sum_u = sk_conv1 + sk_conv2 + sk_conv3
    squeeze = global_avg_pool(sum_u)
    squeeze = tf.reshape(squeeze, [-1, 1, 1, out_dim])
    z = tf.layers.dense(squeeze, use_bias=True, units=middle)
    z = tf.nn.relu(z)
    a1 = tf.layers.dense(z, use_bias=True, units=out_dim)
    a2 = tf.layers.dense(z, use_bias=True, units=out_dim)
    a3 = tf.layers.dense(z, use_bias=True, units=out_dim)

    before_softmax = tf.concat([a1, a2, a3], 1)
    after_softmax = tf.nn.softmax(before_softmax, dim=1)
    a1 = after_softmax[:, 0, :, :]
    a1 = tf.reshape(a1, [-1, 1, 1, out_dim])
    a2 = after_softmax[:, 1, :, :]
    a2 = tf.reshape(a2, [-1, 1, 1, out_dim])
    a3 = after_softmax[:, 2, :, :]
    a3 = tf.reshape(a3, [-1, 1, 1, out_dim])

    select_1 = sk_conv1 * a1
    select_2 = sk_conv2 * a2
    select_3 = sk_conv3 * a3

    out = select_1 + select_2 + select_3

    return out
def se_net_res(input_x, ratio, layer_name, is_training, bn_decay):
    with tf.name_scope(layer_name):
        out_dim = input_x.get_shape()[-1].value
        squeeze = global_avg_pool(input_x)
        squeeze = tf.reshape(squeeze, [-1, 1, out_dim])
        excitation = tf_util.conv1d(squeeze,
                                    out_dim / ratio,
                                    1,
                                    padding='SAME',
                                    bn=True,
                                    is_training=is_training,
                                    scope=layer_name + 'fc1',
                                    bn_decay=bn_decay)
        excitation = tf.nn.relu(excitation)
        excitation = tf_util.conv1d(excitation,
                                    out_dim,
                                    1,
                                    padding='SAME',
                                    bn=True,
                                    is_training=is_training,
                                    scope=layer_name + 'fc2',
                                    bn_decay=bn_decay)
        excitation = tf.nn.sigmoid(excitation)
        excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
        scale = input_x * excitation
        scale = scale + input_x
        return scale
Exemplo n.º 4
0
def Global_Average_Pooling(x, stride=1):
    # width = np.shape(x)[1]
    # height = np.shape(x)[2]
    # pool_size = [width, height]
    # return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride)

    return global_avg_pool(x, name='Global_avg_pooling')
Exemplo n.º 5
0
def mobile_net_v2(input_shape, n_classes, img_prep=None, img_aug=None):
	"""MobileNetv2
	This function defines a MobileNetv2 architectures.

	Parameters
	----------
		input_shape: An integer or tuple/list of 3 integers, shape
			of input tensor.
		n_classes: Number of classes.
		img_prep: Function handle for image pre-processing
		img_aug: Function handle for image augmentation

	# Returns
		MobileNetv2 model.
	"""
	inputs = input_data(shape=input_shape, data_preprocessing=img_prep, data_augmentation=img_aug)
	x = reshape(inputs, [-1, input_shape[0], input_shape[1], 1])
	x = _conv_block(x, 32, (3, 3), strides=(2, 2))

	x = _inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1)
	x = _inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2)
	x = _inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3)
	x = _inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4)
	x = _inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3)
	x = _inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3)
	x = _inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1)
	x = _conv_block(x, 1280, (1, 1), strides=(1, 1))
	x = global_avg_pool(x)
	x = reshape(x, [-1, 1, 1, 1280])
	x = dropout(x, 0.3, name='Dropout')
	x = conv_2d(x, n_classes, (1, 1), padding='same', activation='softmax', weights_init='xavier')

	output = reshape(x, [-1, n_classes])
	return output
Exemplo n.º 6
0
    def build_SEnet(self, input_x):
        input_x = self.conv_bn_layer(
            input_x,
            filters=self.out_dims[0],
            filter_size=3,
            stride=1,
            scope="first_layer")
        print(input_x)

        for i, out_dim in enumerate(self.out_dims[1:]):
            x = self.residual_layer(
                (x if i else input_x),
                out_dim=out_dim,
                num_block=self.num_block,
                depth=self.depth,
                cardinality=self.cardinality,
                reduction_ratio=self.reduction_ratio,
                layer_num=str(i+1))
            print(x)

        x = global_avg_pool(x)
        print(x)
        x = flatten(x)
        print(x)
        return tf.layers.dense(inputs=x, use_bias=False, units=9)
Exemplo n.º 7
0
def alchNet19(img_prep, img_aug, learning_rate):
    network = input_data(shape=[None, 64, 64, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    network = conv_2d(network, 64, 3, activation='relu')
    network = batch_normalization(network)

    network = resLayer(network, 64)
    network = resLayer(network, 64)
    network = resLayer(network, 128, stride = 2)
    network = resLayer(network, 128)
    network = resLayer(network, 256, stride = 2)
    network = resLayer(network, 256)
    network = resLayer(network, 512, stride = 2)
    network = resLayer(network, 512)
    network = global_avg_pool(network)
    network = fully_connected(network, 1024, activation='relu')
    network = batch_normalization(network, stddev=0.002, trainable=True, restore=True, reuse=False)
    network = dropout(network, 0.5)
    network = fully_connected(network, 200, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=learning_rate)
    return network
Exemplo n.º 8
0
def squeeze_excitation_layer(input_x, out_dim, middle):
    squeeze = global_avg_pool(input_x)
    excitation = tf.layers.dense(squeeze, use_bias=True, units=middle)
    excitation = tf.nn.relu(excitation)
    excitation = tf.layers.dense(excitation, use_bias=True, units=out_dim)
    excitation = tf.nn.sigmoid(excitation)
    excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
    scale = input_x * excitation
    return scale
Exemplo n.º 9
0
def squeeze_and_excitation(input_x,ratio=16):
    x=global_avg_pool(input_x)
    x=tf.layers.dense(x,units=input_x.shape[3]//ratio)
    x=tf.nn.relu(x)
    x=tf.layers.dense(x,units=input_x.shape[3])
    x=tf.nn.sigmoid(x)
    x=tf.reshape(x,[-1,1,1,input_x.shape[3]])
    scale=x*input_x
    return scale
Exemplo n.º 10
0
    def create(self, is_training=False):

        with tf.variable_scope(self.scope, reuse=self.reuse):
            with slim.arg_scope([slim.fully_connected],
                                activation_fn=tf.nn.relu):
                with slim.arg_scope([slim.conv2d],
                                    activation_fn=tf.nn.relu,
                                    padding='VALID'):
                    net = self.inputs
                    net = slim.conv2d(net, 64, 5, scope='conv1')
                    self.conv1 = net
                    net = slim.max_pool2d(net, 2, stride=2, scope='pool1')
                    self.pool1 = net
                    net = slim.conv2d(net, 128, 5, scope='conv2')
                    self.conv2 = net
                    net = slim.max_pool2d(net, 2, stride=2, scope='pool2')
                    self.pool2 = net
                    self.f = tf.contrib.layers.flatten(net)
                    ###############################################################################
                    self.avg = global_avg_pool(net, name="Global_avg_pooling")
                    excitation = slim.fully_connected(self.avg,
                                                      128 / 16,
                                                      activation_fn=tf.nn.relu,
                                                      scope='atten_fc1')

                    excitation = slim.fully_connected(
                        excitation,
                        128,
                        activation_fn=tf.nn.sigmoid,
                        scope='atten_fc2')

                    self.atten_weight = tf.reshape(excitation, [-1, 1, 1, 128])
                    net = net * self.atten_weight
                    self.cmp = net
                    self.att_flat = tf.contrib.layers.flatten(self.cmp)
                    ###############################################################################
                    net = tf.contrib.layers.flatten(self.pool2)

                    net = slim.fully_connected(net,
                                               1024,
                                               activation_fn=tf.nn.relu,
                                               scope='fc3')
                    self.fc3 = net
                    net = slim.dropout(net,
                                       0.5,
                                       is_training=self.training_flag)
                    net = slim.fully_connected(net,
                                               64,
                                               activation_fn=tf.tanh,
                                               scope='fc4')
                    self.fc4 = net
                    net = slim.fully_connected(net,
                                               10,
                                               activation_fn=None,
                                               scope='fc5')
                    self.fc5 = net
                    self.softmax_output = slim.softmax(net, scope='prediction')
Exemplo n.º 11
0
def Global_Average_Pooling(x,stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width,height]
    return tf.layer.average_pooling2d(inputs=x,pool_size=pool_size,strides=stride)
    # The strdie value does not matter.It is global average pooling without tflearn
    """
    return global_avg_pool(x,name='Global_avg_pooling')
Exemplo n.º 12
0
    def build_network(self, loadModel=False):
        """
            构建模型
        """
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        img_aug = ImageAugmentation()
        img_aug.add_random_flip_leftright()
        # img_aug.add_random_rotation(max_angle=25.)
        img_aug.add_random_blur(sigma_max=0.3)
        # 输入数据 http://tflearn.org/layers/core/#input-data
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1], data_augmentation=img_aug)
        # self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        # 卷积层 http://tflearn.org/layers/conv/#convolution-2d
        # 激活函数 http://tflearn.org/activations/
        self.network = conv_2d(self.network, 64, 3, activation='relu')
        # self.gap1 = global_avg_pool(self.network)
        # 池化层 http://tflearn.org/layers/conv/#max-pooling-2d
        self.network = max_pool_2d(self.network, 2, strides=2)
        # 卷积层
        self.network = conv_2d(self.network, 96, 3, activation='relu')
        # self.gap2 = global_avg_pool(self.network)
        # 池化层
        self.network = max_pool_2d(self.network, 2, strides=2)
        # 卷积层
        self.network = conv_2d(self.network, 128, 3, activation='relu')
        self.network = global_avg_pool(self.network)
        # 全连接层 http://tflearn.org/layers/core/#fully-connected
        self.network = fully_connected(self.network, 2048, activation='relu',
            weight_decay=0.001)

        # dropout随机将部分输出改为0,避免过拟合 http://tflearn.org/layers/core/#dropout
        self.network = dropout(self.network, 0.8)
        # 全连接层:softmax分类
        # self.network = merge([self.gap1, self.gap2, self.gap3], mode="concat", name="concat")
        self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax')

        # 定义损失函数和优化器 http://tflearn.org/layers/estimator/#regression
        self.network = regression(self.network,
            # http://tflearn.org/optimizers/
            optimizer='Adam',
            # optimizer='SGD',
            # http://tflearn.org/objectives/
            loss='categorical_crossentropy',
            learning_rate=0.001)
        # 定义模型 http://tflearn.org/models/dnn/#deep-neural-network-model
        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
            tensorboard_dir='c:\\tmp\\tflearn_logs',
            max_checkpoints=1,
            tensorboard_verbose=2
        )
        if loadModel:
            self.load_model()
Exemplo n.º 13
0
def Global_Average_Pooling(x, stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
    It is global average pooling without tflearn
    """

    return global_avg_pool(x, name='Global_avg_pooling')
Exemplo n.º 14
0
def global_average_pool(x, name='global_avg_pooling'):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride)
    """
    net = global_avg_pool(x, name=name)
    print('{}: {}'.format(name, net.get_shape()))
    return net
Exemplo n.º 15
0
def construct_inceptionv4onfire(x,y, training=False, enable_batch_norm=True):

    network = input_data(shape=[None, y, x, 3])

    #stem of inceptionV4

    conv1_3_3 = conv_2d(network,32,3,strides=2,activation='relu',name='conv1_3_3_s2',padding='valid')
    conv2_3_3 = conv_2d(conv1_3_3,32,3,activation='relu',name='conv2_3_3')
    conv3_3_3 = conv_2d(conv2_3_3,64,3,activation='relu',name='conv3_3_3')
    b_conv_1_pool = max_pool_2d(conv3_3_3,kernel_size=3,strides=2,padding='valid',name='b_conv_1_pool')
    if enable_batch_norm:
        b_conv_1_pool = batch_normalization(b_conv_1_pool)
    b_conv_1_conv = conv_2d(conv3_3_3,96,3,strides=2,padding='valid',activation='relu',name='b_conv_1_conv')
    b_conv_1 = merge([b_conv_1_conv,b_conv_1_pool],mode='concat',axis=3)

    b_conv4_1_1 = conv_2d(b_conv_1,64,1,activation='relu',name='conv4_3_3')
    b_conv4_3_3 = conv_2d(b_conv4_1_1,96,3,padding='valid',activation='relu',name='conv5_3_3')

    b_conv4_1_1_reduce = conv_2d(b_conv_1,64,1,activation='relu',name='b_conv4_1_1_reduce')
    b_conv4_1_7 = conv_2d(b_conv4_1_1_reduce,64,[1,7],activation='relu',name='b_conv4_1_7')
    b_conv4_7_1 = conv_2d(b_conv4_1_7,64,[7,1],activation='relu',name='b_conv4_7_1')
    b_conv4_3_3_v = conv_2d(b_conv4_7_1,96,3,padding='valid',name='b_conv4_3_3_v')
    b_conv_4 = merge([b_conv4_3_3_v, b_conv4_3_3],mode='concat',axis=3)

    b_conv5_3_3 = conv_2d(b_conv_4,192,3,padding='valid',activation='relu',name='b_conv5_3_3',strides=2)
    b_pool5_3_3 = max_pool_2d(b_conv_4,kernel_size=3,padding='valid',strides=2,name='b_pool5_3_3')
    if enable_batch_norm:
        b_pool5_3_3 = batch_normalization(b_pool5_3_3)
    b_conv_5 = merge([b_conv5_3_3,b_pool5_3_3],mode='concat',axis=3)
    net = b_conv_5

    # inceptionV4 modules

    net=inception_block_a(net)

    net=inception_block_b(net)

    net=inception_block_c(net)

    pool5_7_7=global_avg_pool(net)
    if(training):
        pool5_7_7=dropout(pool5_7_7,0.4)
    loss = fully_connected(pool5_7_7, 2,activation='softmax')

    if(training):
        network = regression(loss, optimizer='rmsprop',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
    else:
        network=loss

    model = tflearn.DNN(network, checkpoint_path='inceptionv4onfire',
                        max_checkpoints=1, tensorboard_verbose=0)

    return model
Exemplo n.º 16
0
def SE_layer(x, channel, reduction=4):
    with tf.name_scope('SElayer'):
        avg_pool = global_avg_pool(x, name='Global_avg_pool')
        fc1 = tf.layers.dense(inputs=avg_pool,
                              use_bias=True,
                              units=channel // reduction)
        ReLU = tf.nn.relu(fc1)
        fc2 = tf.layers.dense(inputs=ReLU, use_bias=True, units=channel)
        Sigmoid = tf.nn.sigmoid(fc2)
        excitation = tf.reshape(Sigmoid, [-1, 1, 1, channel])
        return x * excitation
Exemplo n.º 17
0
def Global_Average_Pooling(x, stride=1):
    """
    图像的行和列数据集合
    width=np.shape(x)[1]
    height=np.shape(x)[2]
    :param x:
    :param stride:
    :return:
    下面使用h5对其进行相应的存储
    """
    return global_avg_pool(x,name='Global_Average_Pooling')
Exemplo n.º 18
0
 def channel_attention(input_x, out_dim, ratio, layer_name):
     with tf.name_scope(layer_name):
         squeeze = global_avg_pool(input_x, name='Global_avg_pooling')
         excitation = tf.layers.dense(
             inputs=squeeze, use_bias=False, units=out_dim / ratio, name=layer_name+'_'+'dense1')
         excitation = tf.nn.relu(excitation)
         excitation = tf.layers.dense(
             inputs=excitation, use_bias=False, units=out_dim, name=layer_name+'_'+'dense2')
         excitation = tf.nn.sigmoid(excitation)
         excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
         scale = input_x * excitation
     return scale
Exemplo n.º 19
0
class GoogLeNet:
    network = input_data(shape=[None, 1024, 1024, 1])

    network = conv_2d(network, 64, 9, strides=4, activation='relu', bias=False)
    '''Bottleneck'''
    network = tflearn.residual_bottleneck(network,
                                          nb_blocks=3,
                                          bottleneck_size=16,
                                          out_channels=64)
    network = tflearn.residual_bottleneck(network,
                                          nb_blocks=1,
                                          bottleneck_size=32,
                                          out_channels=128,
                                          downsample=True)
    network = tflearn.residual_bottleneck(network,
                                          nb_blocks=2,
                                          bottleneck_size=32,
                                          out_channels=128)
    network = tflearn.residual_bottleneck(network,
                                          nb_blocks=1,
                                          bottleneck_size=64,
                                          out_channels=256,
                                          downsample=True)
    network = tflearn.residual_bottleneck(network,
                                          nb_blocks=2,
                                          bottleneck_size=64,
                                          out_channels=256)
    network = batch_normalization(network)
    network = tflearn.activation(network, 'relu')
    network = global_avg_pool(network)
    '''Output layer'''
    output = fully_connected(network, 15, activation='sigmoid')

    network = regression(output,
                         optimizer='momentum',
                         loss='binary_crossentropy',
                         learning_rate=0.01)
    '''Set model + Save parameters + Tensorboard'''
    model = tflearn.DNN(network,
                        checkpoint_path='params_resnet_cxr',
                        max_checkpoints=1,
                        tensorboard_verbose=0)
    '''Feed the oxflowers17 dataset to the model'''
    model.fit(train_x,
              train_t,
              n_epoch=10,
              validation_set=(test_x, test_t),
              show_metric=True,
              batch_size=16,
              snapshot_epoch=False,
              snapshot_step=100,
              run_id='resnet_cxr')
Exemplo n.º 20
0
def ch_attn(x):
    batch_size, height, width, ch = x.get_shape().as_list()

    squeeze = global_avg_pool(x, name='Global_avg_pooling')
    excitation = Fully_connected(squeeze, units=ch // 16, layer_name='_fully_connected1')
    excitation = lrelu(excitation, 0.2)
    excitation = Fully_connected(excitation, units=ch, layer_name='_fully_connected2')
    excitation = tf.nn.sigmoid(excitation)

    excitation = tf.reshape(excitation, [-1,1,1,ch])

    scale = x * excitation

    return scale
Exemplo n.º 21
0
def exit_flow(input_data, num_classes):
    net = slim.separable_conv2d(input_data, 728, [3, 3], depth_multiplier=1)
    net = slim.separable_conv2d(net, 1024, [3, 3], depth_multiplier=1)

    net = slim.conv2d(net, 1024, [2, 2], 2)

    net1 = slim.conv2d(input_data, 1024, [1, 1], 2)
    net = tf.add(net, net1)
    net = slim.separable_conv2d(net, 1536, [3, 3], depth_multiplier=1)
    net = slim.separable_conv2d(net, 2048, [3, 3], depth_multiplier=1)
    net = global_avg_pool(net)
    net = slim.fully_connected(net, num_classes)

    net = slim.softmax(net)
    return net
Exemplo n.º 22
0
def ANN(WIDTH, HEIGHT, CHANNELS, LABELS):
    dropout_value = 0.35

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Building the network
    network = input_data(shape=[None, WIDTH, HEIGHT, CHANNELS],
                         data_preprocessing=img_prep,
                         name='input')

    network = conv_2d(network, 64, 3, activation='relu', bias=False)
    # Residual blocks'

    network = residual_bottleneck(network, 3, 16, 64)
    network = residual_bottleneck(network, 1, 32, 128, downsample=True)
    network = residual_bottleneck(network, 2, 32, 128)
    network = residual_bottleneck(network, 1, 64, 256, downsample=True)
    network = residual_bottleneck(network, 2, 64, 256)
    network = residual_bottleneck(network, 1, 128, 512, downsample=True)
    network = residual_bottleneck(network, 2, 128, 512)

    network = batch_normalization(network)
    network = activation(network, 'relu')
    network = global_avg_pool(network)

    # Output layer
    network = fully_connected(network, LABELS, activation='softmax')

    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.01,
                         loss='categorical_crossentropy',
                         name='target')
    '''
	# Regression
	network = regression(network, optimizer = 'momentum',
	                         loss  = 'categorical_crossentropy',
	                         learning_rate = 0.1)
	'''
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir='./logs',
                        best_checkpoint_path='./checkpoints/best/best_val',
                        max_checkpoints=1)
    return model
Exemplo n.º 23
0
    def squeeze_excitation_layer(self, input_x, out_dim, reduction_ratio,
                                 layer_name):
        with tf.name_scope(layer_name):
            pool = global_avg_pool(input_x)
            squeeze = tf.layers.dense(
                pool,
                use_bias=False,
                units=out_dim/reduction_ratio,
            )
            squeeze = tf.nn.relu(squeeze)
            excitation = tf.layers.dense(
                squeeze, units=out_dim, use_bias=False)
            excitation = tf.nn.sigmoid(excitation)

            excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
            return input_x*excitation
Exemplo n.º 24
0
    def network(self):
        in_layer = input_data([None, 1, self.str_len * 2 + 2, 1])
        indices = in_layer[:, 0, :2, 0]

        if self.emb > 1:
            lstm1 = lstm(embedding(in_layer[:, 0, 2:, 0], 26, self.emb),
                         300,
                         return_seq=True)
        else:
            lstm1 = lstm(in_layer[:, 0, 2:, :], 300, return_seq=True)

        # lstm branch
        lstm2 = lstm(lstm1, 300, return_seq=True)
        lstm3 = lstm(lstm2, 300, return_seq=True)
        lstm4 = lstm(lstm3, 300)

        # cnn branch
        in_layer = bn(in_layer)
        conv1 = conv_2d(in_layer, 64, [1, 7], 1)
        norm1 = relu(bn(conv1))
        block1 = self.residual_block(norm1, 128, [1, 3], 2, stride=2)
        block2 = self.residual_block(block1, 256, [1, 3], 2, stride=2)
        block3 = self.residual_block(block2, 512, [1, 3], 2)
        block4 = self.residual_block(block3, 1024, [1, 3], 2)
        n_out_filters = block4.get_shape().as_list()[-1]
        gap = tf.reshape(global_avg_pool(block4), [-1, n_out_filters])

        # fully-connected branch
        fc_ind = fc(indices, 100, activation='tanh')
        fc_ind2 = fc(fc_ind, 100, activation='tanh')

        # merge lstm, conv, and fc layers
        merged = tf.concat([lstm4, gap, fc_ind2], 1)

        out = fc(merged, self.num_classes,
                 activation='softmax')  # output layer

        # describe optimization
        net = regression(out,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=self.lr)

        # build model
        model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir='.')

        return model
Exemplo n.º 25
0
def Dense_net(X, num_filter, nb_blocks):
    conv = tf.layers.conv2d(
        inputs=X, filters=2*num_filter, kernel_size=[3, 3], padding='SAME',
        strides=2, kernel_initializer=tf.contrib.layers.xavier_initializer())
    net = tf.layers.max_pooling2d(inputs=X, strides=2, pool_size=[3, 3], padding='VALID')

    for idx, i in enumerate(nb_blocks):
        net = dense_block(X=net, num_filter=num_filter, nb_layers=i, layer_name='dense_'+str(idx))
        net = transition_layer(net, scope='trans_'+str(idx))

    net = tf.layers.batch_normalization(inputs=net)
    net = tf.nn.relu(net)
    net = global_avg_pool(net, name='Global_avg_pooling')
    net = tf.contrib.layers.flatten(net)
    net = tf.layers.dense(inputs=net, units=OUTPUT_SIZE, name='linear')

    return net
Exemplo n.º 26
0
def MY_ResneXt(Img, ImageSize, MiniBatchSize):

    num_classes = 10
    depth = 8
    cardinality = 32
    blocks = 3

    net = Img
    net = conv_layer_with_relu(net=net,
                               num_filters=32,
                               kernel_size=3,
                               strides=1)

    net = single_resneXt_block(net=net,
                               strides=1,
                               kernel_size=3,
                               depth=depth,
                               cardinality=cardinality)
    net = conv_layer_with_relu(net=net,
                               num_filters=128,
                               kernel_size=1,
                               strides=2)
    cardinality = 16
    net = single_resneXt_block(net=net,
                               strides=1,
                               kernel_size=3,
                               depth=depth,
                               cardinality=cardinality)

    net = global_avg_pool(net, name='Global_avg_pooling')

    net = tf.contrib.layers.flatten(net)

    # net = tf.layers.dense(inputs=net, name='layer_fc1',
    #                   units=128, activation=tf.nn.relu)
    #Contruct Fully-connected layer
    net = tf.layers.dense(inputs=net,
                          name='layer_fc_out',
                          units=num_classes,
                          activation=None)

    prLogits = net
    prSoftMax = tf.nn.softmax(net)
    return prLogits, prSoftMax
    def Squeeze_excitation_layer(self, input_x, out_dim, ratio, layer_name):
        with tf.name_scope(layer_name):
            squeeze = global_avg_pool(input_x, name='Global_avg_pooling')

            excitation = Fully_connected(squeeze,
                                         units=out_dim / ratio,
                                         layer_name=layer_name +
                                         '_fully_connected1')
            excitation = tf.nn.relu(excitation)
            excitation = Fully_connected(excitation,
                                         units=out_dim,
                                         layer_name=layer_name +
                                         '_fully_connected2')
            excitation = tf.nn.sigmoid(excitation)

            excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
            scale = input_x * excitation

            return scale
Exemplo n.º 28
0
def ch_attn(input, channels, scope):
    with tf.variable_scope(scope):
        squeeze = global_avg_pool(input)
        squeeze = tf.reshape(squeeze, [-1, 1, 1, channels])
        squeeze = conv(squeeze,
                       channels // 8,
                       kernel=1,
                       stride=1,
                       scope="squeeze_conv")
        excite = relu(squeeze)
        excite = conv(excite,
                      channels,
                      kernel=1,
                      stride=1,
                      scope="excite_conv")
        attn = tf.nn.sigmoid(excite)
        output = attn * input

    return output
Exemplo n.º 29
0
def Squeeze_Excitation_Block(input_x, output_chn, ratio=16):
    batch, in_depth, in_height, in_width, in_channels = [
        int(d) for d in input_x.get_shape()
    ]  # 取出各维度大小
    Squeeze = tf.reshape(input_x,
                         (batch, in_depth, in_height * in_width, in_channels))
    Squeeze = global_avg_pool(Squeeze)
    #print("squeeze",Squeeze.shape)#1+通道数量
    Excitation = tf.layers.dense(Squeeze,
                                 units=output_chn / ratio,
                                 use_bias=False)  #进行压缩,学习注意参数
    Excitation = tf.nn.relu(Excitation)
    Excitation = tf.layers.dense(Excitation, units=output_chn, use_bias=False)
    Excitation = tf.sigmoid(Excitation)
    Excitation = tf.reshape(Excitation, [-1, 1, 1, 1, output_chn])  #生成了缩放尺寸
    # print("ex:",Excitation.shape)
    # print("input_x:", input_x.shape)
    scale = input_x * Excitation  #对输入进行缩放,美滋滋
    return scale
Exemplo n.º 30
0
def NCAM_Module(in_dim):
    """ Position attention module"""
    # Ref from SAGAN
    chanel_in = in_dim
    #gamma = tf.Variable(tf.zeros([1]),name='gamma')
    m_batchsize, height, width, C = combined_static_and_dynamic_shape(
        chanel_in)
    globel_avg = global_avg_pool(chanel_in)
    channel_avg_weights = tf.reshape(globel_avg, [1, C, -1])
    globel_max = global_max_pool(chanel_in)
    channel_max_weights = tf.reshape(globel_max, [1, -1, C])
    energy = tf.matmul(channel_avg_weights, channel_max_weights)  # 矩阵乘法
    attention = tf.nn.softmax(energy, dim=-1)  # 添加非线性函数
    proj_value_CAM = tf.reshape(chanel_in, [m_batchsize, C, -1])
    out = tf.matmul(attention, proj_value_CAM)
    out = tf.reshape(out, [m_batchsize, height, width, C])  # reshape到原图
    #  out =gamma * out
    out = PAM_Module(out)
    out = out + chanel_in
    return out
Exemplo n.º 31
0
def Global_Average_Pooling(x, voxel, stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
    It is global average pooling without tflearn
    """
    start = 0
    for i in range(len(voxel)):
        end = start + voxel[i]
        # 各个维度相加取平均
        x_tmp = np.sum(x[start:end, :, :, :], axis=0) / (end - start)
        if i == 0:
            x_ave = x_tmp
        else:
            x_ave = np.vstack((x_ave, x_tmp))
        start = end

    return global_avg_pool(x_ave, name='Global_avg_pooling')
Exemplo n.º 32
0
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)

# Convolutional network building
net = input_data(shape=[None, 32, 32, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)

filters = [64,128,256,512]
for f in filters:
  net = fractal_conv2d(net, 4, f, 3,
                       normalizer_fn=batch_normalization)
  net = slim.max_pool2d(net,2, 2)

net = fractal_conv2d(net, 4, 512, 2,
                     normalizer_fn=batch_normalization)


net = conv_2d(net, 10, 1)
net = global_avg_pool(net)
net = softmax(net)

net = regression(net, optimizer='adam',
                     loss='categorical_crossentropy',
                 learning_rate=.002)

# Train using classifier
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=400, shuffle=True, validation_set=(X_test, Y_test),
          show_metric=True, batch_size=32, run_id='cifar10_cnn')
Exemplo n.º 33
0
def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling')