예제 #1
0
    def vgg16(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {
            'c1_1': [3, 3, x_shape[2], 64], 'c1_2': [3, 3, 64, 64],
            'c2_1': [3, 3, 64, 128], 'c2_2': [3, 3, 128, 128],
            'c3_1': [3, 3, 128, 256], 'c3_2': [3, 3, 256, 256],
            'c3_3': [3, 3, 256, 256],
            'c4_1': [3, 3, 256, 512], 'c4_2': [3, 3, 512, 512],
            'c4_3': [3, 3, 512, 512],
            'c5_1': [3, 3, 512, 512], 'c5_2': [3, 3, 512, 512],
            'c5_3': [3, 3, 512, 512]}
        strides = {'c': [1, 1, 1, 1], 'p': [1, 2, 2, 1]}
        pool_win_size = [1, 2, 2, 1]
        conv = x

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(conv,'Conv_1_1', kernel['c1_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_1_2', kernel['c1_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'Conv_2_1', kernel['c2_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_2_2', kernel['c2_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_3') as scope:
            conv = ops.conv2d(conv,'Conv_3_1', kernel['c3_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_3_2', kernel['c3_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_3_3', kernel['c3_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_4') as scope:
            conv = ops.conv2d(conv,'Conv_4_1', kernel['c4_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_4_2', kernel['c4_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_4_3', kernel['c4_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_5') as scope:
            conv = ops.conv2d(conv,'Conv_5_1', kernel['c5_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_5_2', kernel['c5_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_5_3', kernel['c5_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Flatten_layer') as scope:
            conv = ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1', 4096, activation='relu', initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 4096, activation='relu', initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer', self.no_of_classes, activation="none", initializer='xavier')
        return conv
예제 #2
0
    def alexnet(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {'c1': [11, 11, x_shape[2], 96], 'c2': [5, 5, 96, 256],
                  'c3': [3, 3, 256, 384], 'c4': [3, 3, 384, 384],
                  'c5': [3, 3, 384, 256]}
        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1]}
        pool_win_size = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1]}

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(x,'conv_1', kernel['c1'], strides['4'], 'VALID')
            conv = tf.nn.lrn(conv, depth_radius=2, bias=1.0, alpha=1e-05, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], "VALID")
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'conv_2', kernel['c2'], strides['1'], padding='SAME', groups=2)
            conv = tf.nn.lrn(conv, depth_radius=2, bias=1.0, alpha=1e-05, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], 'VALID')
        with tf.variable_scope('Conv_3') as scope:
            conv = ops.conv2d(conv,'conv_3', kernel['c3'], strides['1'], 'SAME')
        with tf.variable_scope('Conv_4') as scope:
            conv = ops.conv2d(conv,'conv_4', kernel['c4'], strides['1'], 'SAME', groups=2)
        with tf.variable_scope('Conv_5') as scope:
            conv = ops.conv2d(conv,'conv_5', kernel['c5'], strides['1'], 'SAME', groups=2)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], 'VALID')
        with tf.variable_scope('Flatten_layer') as scope:
            conv=ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1', 4096, activation=['relu', 'dropout'], initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 4096, activation=['relu', 'dropout'], initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer',self.no_of_classes, activation='none', initializer='xavier')
        return conv
예제 #3
0
    def resnet_with_bottleneck(self,input,is_training,layer_from_2=[3,4,6,3],first_kernel=7,first_stride=2,first_pool=True,stride=2):

        input_shape = input.get_shape().as_list()[1:]
        conv=ops.conv2d(input,'initial_conv',[first_kernel,first_kernel,input_shape[2],64],[1,first_stride,first_stride,1])
        if first_pool:
            conv=ops.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1])

        for i in range(layer_from_2[0]):
            conv=ops.residual_bottleneck_block(conv,'Block_1_'+str(i),is_training,256,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[1]):
            conv=ops.residual_bottleneck_block(conv,'Block_2_'+str(i),is_training,512,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[2]):
            conv=ops.residual_bottleneck_block(conv,'Block_3_'+str(i),is_training,1024,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[3]):
            conv=ops.residual_bottleneck_block(conv,'Block_4_'+str(i),is_training,2048,kernel=3,first_block=True,stride=stride)
        with tf.variable_scope('unit'):
            conv = ops.batch_normalization(conv,is_training)
            conv = tf.nn.relu(conv)
            conv = ops.global_avg_pool(conv)
            conv =ops.flatten(conv)
        with tf.variable_scope('logit'):
            conv = ops.get_hidden_layer(conv,'output',self.no_of_classes,'none')
        return conv
예제 #4
0
 def model(self, x, is_training):
     op = ops.get_n_hidden_layers(x,
                                  self.hidden_layer_list,
                                  self.activation_list,
                                  initializer='xavier')
     return ops.get_hidden_layer(op,
                                 'output_layer',
                                 self.no_of_classes,
                                 'none',
                                 initializer='xavier')
예제 #5
0
    def lenet(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {'c1': [5, 5, x_shape[2], 20], 'c2': [5, 5, 20, 50]}
        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1]}
        pool_win_size = {'2': [1, 2, 2, 1]}

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(x,'conv1', kernel['c1'], strides['1'], 'SAME')
            conv = tf.nn.lrn(conv, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['2'], strides['2'])
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'conv2', kernel['c2'], strides['1'], 'SAME')
            conv = tf.nn.lrn(conv, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['2'], strides['2'])
        with tf.variable_scope('Flatten_layer') as scope:
            conv=ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1',120, initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 84, initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer', self.no_of_classes, activation="none", initializer='xavier')
        return conv
예제 #6
0
def model_fun(x, is_training):
    x_shape = x.get_shape().as_list()[1:]
    kernel = {'c1': [5, 5, x_shape[2], 20], 'c2': [5, 5, 20, 50]}
    strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1]}
    pool_win_size = {'2': [1, 2, 2, 1]}

    conv = ops.conv2d(x, 'conv1', kernel['c1'], strides['1'], 'SAME')

    conv = ops.residual_bottleneck_block(conv, 'ins_block', is_training, 64)

    with tf.variable_scope('Flatten_layer') as scope:
        conv = ops.flatten(conv)
    with tf.variable_scope('Output_layer') as scope:
        conv = ops.get_hidden_layer(conv,
                                    'output_layer',
                                    10,
                                    activation="none",
                                    initializer='xavier')
    return conv
    def get_model(self, x, is_training):

        if isinstance(self.cell_size, list):
            rnn_layers = [
                tf.nn.rnn_cell.LSTMCell(self.cell_size[i], name=str(i))
                for i in range(len(self.cell_size))
            ]
            multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
        elif isinstance(self.cell_size, int):
            rnn_layers = [
                tf.nn.rnn_cell.LSTMCell(self.cell_size, name=str(i))
                for i in range(self.no_of_cell)
            ]
            multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)

        outputs, states = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
                                            inputs=x,
                                            dtype=tf.float32)

        op = tf.gather(outputs, int(outputs.get_shape()[1]) - 1, axis=1)
        op = ops.get_n_hidden_layers(op, '', self.hidden_layers,
                                     self.activation_list)
        return ops.get_hidden_layer(op, 'output_layer', 1, 'none')
 def get_model(self,x,is_training):
     return ops.get_hidden_layer(x,'output_layer',1,'none')
예제 #9
0
 def get_model(self,x,is_training):
     op=ops.get_n_hidden_layers(x,'',self.hidden_layers,self.activation_list)
     return ops.get_hidden_layer(op,'output_layer',1,'none')
예제 #10
0
    def densenet(self, x, is_training, no_of_blocks=3, block_layers=7, first_conv_op_channel=16, block_op_channel=12,
                 kernal_size=3):

        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1], '8': [1, 8, 8, 1]}
        pool_win_size = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1], '8': [1, 8, 8, 1]}
        x_shape = x.get_shape().as_list()[1:]

        kernel = [kernal_size, kernal_size, x_shape[2], first_conv_op_channel]
        conv = ops.conv2d(x, kernel, strides['1'], 'SAME', initial='xavier', with_bias=False)
        if isinstance(block_layers, int):
            with tf.variable_scope('Dense_Block_1') as scope:
                kernel = [kernal_size, kernal_size, first_conv_op_channel, block_op_channel]
                conv = ops.conv2d_dense_block(conv,'Dense_Block_1', is_training, kernel, layers=block_layers,dropout_rate=self.dropout_rate)
                op_channel = first_conv_op_channel + block_layers * block_op_channel
            for _ in range(1, no_of_blocks):
                with tf.variable_scope('transition_layer_' + str(_ - 1)) as scope:
                    conv = tf.contrib.layers.batch_norm(conv, scale=True, is_training=is_training,
                                                        updates_collections=None)
                    conv = tf.nn.relu(conv)
                    kernel = [kernal_size, kernal_size, op_channel, op_channel]
                    conv = ops.conv2d(conv,'transition_layer_' + str(_ - 1), kernel, strides=[1, 1, 1, 1], padding='SAME', initial='xavier',
                                       with_bias=False)
                    conv = tf.nn.dropout(conv, self.dropout_rate)
                    conv = ops.avg_pool(conv, pool_win_size['2'], strides['2'], 'VALID')
                with tf.variable_scope('Dense_Block_' + str(_)) as scope:
                    kernel = [kernal_size, kernal_size, op_channel, block_op_channel]
                    conv = ops.conv2d_dense_block(conv,'Dense_Block_'+str(_),is_training, kernel, layers=block_layers,dropout_rate=self.dropout_rate)
                    op_channel += block_layers * block_op_channel
        elif isinstance(block_layers, list):
            no_of_blocks = len(block_layers)

            with tf.variable_scope('Dense_Block_1') as scope:
                kernel = [kernal_size, kernal_size, first_conv_op_channel, block_op_channel]
                conv = ops.conv2d_dense_block(conv,'Dense_Block_1', is_training, kernel, layers=block_layers[0],dropout_rate=self.dropout_rate)
                op_channel = first_conv_op_channel + block_layers[0] * block_op_channel

            for _ in range(1, no_of_blocks):
                with tf.variable_scope('transition_layer_' + str(_)) as scope:
                    conv = tf.contrib.layers.batch_norm(conv, scale=True, is_training=is_training,
                                                        updates_collections=None)
                    conv = tf.nn.relu(conv)
                    kernel = [kernal_size, kernal_size, op_channel, op_channel]
                    conv = ops.conv2d(conv,'transition_layer_' + str(_), kernel, strides=[1, 1, 1, 1], padding='SAME', initial='xavier',
                                       with_bias=False)
                    conv = tf.nn.dropout(conv, self.dropout_rate)
                    conv = ops.avg_pool(conv, pool_win_size['2'], strides['2'], 'VALID')
                with tf.variable_scope('Dense_Block_' + str(_ + 1)) as scope:
                    kernel = [kernal_size, kernal_size, op_channel, block_op_channel]
                    conv = ops.conv2d_dense_block(conv,'Dense_Block_'+str(_), is_training, kernel, layers=block_layers[_],dropout_rate=self.dropout_rate)
                    op_channel += block_layers[_] * block_op_channel
        with tf.variable_scope('Global_Average_Pooling') as scope:
            conv = tf.contrib.layers.batch_norm(conv, scale=True, is_training=is_training, updates_collections=None)
            conv = tf.nn.relu(conv)
            conv = ops.avg_pool(conv, pool_win_size['8'], strides['8'], 'VALID')

        with tf.variable_scope('Flatten_layer') as scope:
            conv= ops.flatten(conv)

        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer',self.no_of_classes, activation='none', initializer='xavier')

        return conv
예제 #11
0
    def inception_v2(self, input, is_training):
        input_shape = input.get_shape().as_list()[1:]
        conv = ops.conv2d(input,'conv1',kernel_size=[7, 7, input_shape[2], 64], strides=[1, 2, 2, 1])
        conv = tf.nn.relu(conv)
        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])
        conv = tf.nn.local_response_normalization(conv, depth_radius=2, alpha=2e-05, beta=0.75)

        conv = ops.conv2d(conv,'conv2', kernel_size=[1, 1, 64, 64], strides=[1, 1, 1, 1], padding='VALID')
        conv = tf.nn.relu(conv)

        conv_shape = conv.get_shape().as_list()[1:]
        conv = ops.conv2d(conv,'conv3', kernel_size=[3, 3, conv_shape[2], 192], strides=[1, 1, 1, 1])
        conv = tf.nn.relu(conv)

        conv = tf.nn.local_response_normalization(conv, depth_radius=2, alpha=2e-05, beta=0.75)
        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])

        conv = ops.inception_v2_block(conv,'Block_1',is_training, out_channel={'1': 64, '3': 128, '5': 32},
                                      reduced_out_channel={'3': 96, '5': 16, 'p': 32})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_2', is_training, out_channel={'1': 128, '3': 192, '5': 96},
                                      reduced_out_channel={'3': 128, '5': 32, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])

        conv = ops.inception_v2_block(conv,'Block_3', is_training, out_channel={'1': 192, '3': 208, '5': 48},
                                      reduced_out_channel={'3': 96, '5': 16, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_4', is_training, out_channel={'1': 160, '3': 224, '5': 64},
                                      reduced_out_channel={'3': 112, '5': 24, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_5', is_training, out_channel={'1': 128, '3': 256, '5': 64},
                                      reduced_out_channel={'3': 128, '5': 24, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_6', is_training, out_channel={'1': 112, '3': 228, '5': 64},
                                      reduced_out_channel={'3': 144, '5': 32, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_7', is_training, out_channel={'1': 256, '3': 320, '5': 128},
                                      reduced_out_channel={'3': 160, '5': 32, 'p': 128})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])

        conv = ops.inception_v2_block(conv,'Block_8', is_training, out_channel={'1': 256, '3': 320, '5': 128},
                                      reduced_out_channel={'3': 160, '5': 32, 'p': 128})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_9', is_training, out_channel={'1': 384, '3': 384, '5': 128},
                                      reduced_out_channel={'3': 192, '5': 48, 'p': 128})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.global_avg_pool(conv)
        conv = ops.flatten(conv)

        conv = tf.nn.dropout(conv, 0.4)
        conv = ops.get_hidden_layer(conv,'output_layer',1000, 'none', 'xavier')
        return conv