コード例 #1
0
ファイル: Vgg16_slim.py プロジェクト: gideonmanurung/poda
def build_top_layer_model(base_layer,num_depthwise_layer=None,
                          num_fully_connected_layer=1,num_hidden_unit=512,
                          activation_fully_connected='relu',dropout_keep_prob=None,regularizers=None,num_classes=1000):
    previous_layer = base_layer
    if num_depthwise_layer!=None:
        num_depthwise_layer = num_depthwise_layer * 3
        for i in range(num_depthwise_layer):
            depth_wise_net = depthwise_convolution_2d(input_tensor=previous_layer,number_filters=1, 
                                                      kernel_sizes=(3,3), stride_sizes=(2,2), paddings='same',
                                                      activations='relu',names=str(i))
            previous_layer = depth_wise_net

    flatten_layer = flatten(input_tensor=previous_layer)

    if num_fully_connected_layer !=None:
        for j in range(num_fully_connected_layer):
            fully_connected_net = dense(input_tensor=flatten_layer,hidden_units=num_hidden_unit,
                                                  activations=activation_fully_connected,regularizers=regularizers,
                                                  scale=dropout_keep_prob,names=str(j))
            flatten_layer = fully_connected_net
    else:
        flatten_layer = flatten_layer

    non_logit = dense(input_tensor=flatten_layer,hidden_units=num_classes,activations=None)
    if num_classes > 2:
        output = softmax(input_tensor=non_logit, names='output')
    else:
        output = sigmoid(input_tensor=non_logit, names='output')
    return non_logit, output
コード例 #2
0
    def create_model(self):
        number_filter = self.input_tensor.get_shape().as_list()[-1]

        vgg_base = self.vgg_block(input_tensor=self.input_tensor, num_block=self.num_block, batch_normalization=self.batch_normalization)

        base_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
        
        with tf.compat.v1.variable_scope(self.scope, 'vgg_16', [vgg_base]):
            if self.num_depthwise_layer!=None or self.num_depthwise_layer>0:
                for j in range(0,self.num_depthwise_layer):
                    ##### FIX THIS TOMORROW
                    vgg_base = depthwise_convolution_2d(input_tensor=vgg_base, number_filters=number_filter, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', dropout_rates=None, names=None)
            else:
                flatten_layer = flatten(input_tensor=vgg_base, names='flatten')
                for i in range(0, self.num_dense_layer):
                    vgg_base = dense(input_tensor=flatten_layer, hidden_units=self.num_hidden_unit, activations=self.activation_dense, regularizers=self.regularizer, scale=self.dropout_rate)

            
            last_layer = flatten(input_tensor=vgg_base, names='flatten')

            full_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)

            non_logit = dense(input_tensor=last_layer, hidden_units=self.classes, names='output')

            if self.classes > 2:
                output = softmax(input_tensor=non_logit, names='output')
            else:
                output = sigmoid(input_tensor=non_logit, names='output')

        return non_logit, output, base_var_list, full_var_list
コード例 #3
0
    def create_model(self):
        """[summary]
        
        Arguments:
            input_tensor {[type]} -- [description]
        
        Keyword Arguments:
            classes {int} -- [description] (default: {1000})
            n_inception_a {int} -- [description] (default: {4})
            n_inception_b {int} -- [description] (default: {7})
            n_inception_c {int} -- [description] (default: {3})
            batch_normalizations {bool} -- [description] (default: {True})
        """
        with tf.compat.v1.variable_scope(self.scope, 'inception_v4_resnet_v2', [self.input_tensor]):
            stem_layer = self.stem_block(input_tensor=self.input_tensor, batch_normalization=self.batch_normalizations)

            for i in range(0,self.n_inception_a):
                inception_a_layer = self.inception_a(input_tensor=stem_layer, batch_normalization=self.batch_normalizations)

            reduction_a_layer = self.reduction_a(input_tensor=inception_a_layer, batch_normalization=self.batch_normalizations)

            for j in range(0,self.n_inception_b):
                inception_b_layer = self.inception_b(input_tensor=reduction_a_layer, batch_normalization=self.batch_normalizations)

            reduction_b_layer = self.reduction_b(input_tensor=inception_b_layer, batch_normalization=self.batch_normalizations)

            for k in range(0,self.n_inception_c):
                inception_c_layer = self.inception_c(input_tensor=reduction_b_layer, batch_normalization=self.batch_normalizations)

            if n_inception_a == 0:
                inception_v4 = stem_layer
            elif n_inception_b == 0:
                inception_v4 = reduction_a_layer
            elif n_inception_c == 0:
                inception_v4 = reduction_b_layer
            else:
                inception_v4 = inception_c_layer

            base_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)

            inception_v4 = avarage_pool_2d(input_tensor=inception_v4, kernel_sizes=(3,3), paddings='SAME', stride_sizes=(1,1), names='output')

            inception_v4 =  dropout(input_tensor=inception_v4, names='output', dropout_rates=0.2)

            inception_v4 = flatten(input_tensor=inception_v4, names='output')

            full_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)

            non_logit = dense(input_tensor=inception_v4, hidden_units=self.classes, names='output')
            
            if self.classes > 2:
                output = softmax(input_tensor=non_logit, names='output')
            else:
                output = sigmoid(input_tensor=non_logit, names='output')

        return non_logit, output, base_var_list, full_var_list
コード例 #4
0
def build_top_layer_model(base_layer,
                          num_depthwise_layer=None,
                          dropout_keep_prob=None,
                          regularizers=None,
                          num_classes=1000):
    previous_layer = base_layer
    if num_depthwise_layer != None:
        num_depthwise_layer = num_depthwise_layer
        for i in range(num_depthwise_layer):
            depth_wise_net = depthwise_convolution_2d(
                input_tensor=previous_layer,
                number_filters=1,
                kernel_sizes=(3, 3),
                stride_sizes=(2, 2),
                paddings='same',
                activations='relu',
                names=str(i))
            previous_layer = depth_wise_net
    else:
        depth_wise_net = previous_layer

    flatten_layer = dropout(input_tensor=flatten_layer,
                            names='output',
                            dropout_rates=dropout_keep_prob)

    flatten_layer = flatten(input_tensor=depth_wise_net)

    full_var_list = tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)

    non_logit = dense(input_tensor=flatten_layer,
                      hidden_units=num_classes,
                      activations=None)
    if num_classes > 2:
        output = softmax(input_tensor=non_logit, names='output')
    else:
        output = sigmoid(input_tensor=non_logit, names='output')
    return non_logit, output, full_var_list