Example #1
0
def build_model(inputs, is_training):
    # Create the model inference
    with slim.arg_scope(inception_resnet_v1_arg_scope(weight_decay=1e-5)):
        logits, end_points = inception_resnet_v1(inputs,
                                                 bottleneck_layer_size=128,
                                                 is_training=is_training)

    # Define the scopes that you want to exclude for restoration
    #exclude = ['Added', 'Race', 'Gender', 'InceptionResnetV1/Bottleneck/biases']
    exclude = [
        'Added', 'Race', 'Gender', 'InceptionResnetV1/Logits/Bottleneck'
    ]

    # if is_training:
    variables_to_restore = slim.get_variables_to_restore(exclude=exclude)

    # output_conv = graph.get_tensor_by_name('InceptionResnetV1/Block8/Conv2d_1x1/convolution:0')
    # set 1/100 learning rate for previous layers

    # output_conv = end_points['PreLogitsFlatten']
    # output_conv_sg = tf.stop_gradient(output_conv)
    #    output_logits_sg = tf.stop_gradient(logits)

    # adding layers

    logits_gender, logits_race, end_points = add_layers(
        logits, is_training=is_training)
    logits_gender = 1 / 100 * logits_gender + (
        1 - 1 / 100) * tf.stop_gradient(logits_gender)
    logits_race = 1 / 100 * logits_race + (
        1 - 1 / 100) * tf.stop_gradient(logits_race)
    return logits_gender, logits_race, end_points, variables_to_restore
Example #2
0
def Perceptual_Net(input_imgs):
    #input_imgs: [Batchsize,H,W,C], 0-255, BGR image

    input_imgs = tf.reshape(input_imgs, [-1, 224, 224, 3])
    input_imgs = tf.cast(input_imgs, tf.float32)
    input_imgs = tf.clip_by_value(input_imgs, 0, 255)
    input_imgs = (input_imgs - 127.5) / 128.0

    #standard face-net backbone
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None
    }

    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=slim.initializers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(0.0),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        feature_128, _ = inception_resnet_v1(input_imgs,
                                             bottleneck_layer_size=128,
                                             is_training=False,
                                             reuse=tf.AUTO_REUSE)

    # output the last FC layer feature(before classification) as identity feature
    return feature_128
Example #3
0
    def _buildGraph(self):
        x_in = tf.placeholder(
            tf.float32,
            shape=[
                None,  # enables variable batch size
                self.input_dim[0]
            ],
            name="x")
        x_in_reshape = tf.reshape(
            x_in, [-1, self.input_dim[1], self.input_dim[2], 3])

        dropout = tf.placeholder_with_default(1., shape=[], name="dropout")

        y_in = tf.placeholder(dtype=tf.int8, name="y")

        onehot_labels = tf.one_hot(indices=tf.cast(y_in, tf.int32), depth=2)

        is_train = tf.placeholder_with_default(True, shape=[], name="is_train")

        logits, nett, ww = inception_resnet_v1.inception_resnet_v1(
            x_in_reshape,
            num_classes=2,
            is_training=is_train,
            dropout_keep_prob=dropout,
            reuse=None,
            scope='InceptionRes1')

        pred = tf.nn.softmax(logits, name="prediction")

        global_step = tf.Variable(0, trainable=False)

        pred_cost = tf.losses.softmax_cross_entropy(
            onehot_labels=onehot_labels, logits=logits)

        tf.summary.scalar("InceptionR1_cost", pred_cost)

        train_op = tf.contrib.layers.optimize_loss(
            loss=pred_cost,
            learning_rate=self.learning_rate,
            global_step=global_step,
            optimizer="Adam")

        merged_summary = tf.summary.merge_all()

        return (x_in, dropout, is_train, y_in, logits, nett, ww, pred,
                pred_cost, global_step, train_op, merged_summary)
    def model_confirm(self, choosed_model):
        if choosed_model == 'AlexNet':
            model = alexnet(self.config)
        elif choosed_model == 'VGG16':
            model = vgg16(self.config)
        elif choosed_model == 'VGG19':
            model = vgg19(self.config)
        elif choosed_model == 'InceptionV3':
            model = inception_v3(self.config)
        elif choosed_model == 'InceptionV4':
            model = inception_v4(self.config)
        elif choosed_model == 'InceptionV4_ResNetV1':
            model = inception_resnet_v1(self.config)
        elif choosed_model == 'InceptionV4_ResNetV2':
            model = inception_resnet_v2(self.config)
        elif choosed_model == 'ResNet18':
            model = ResnetBuilder.build_resnet_18(self.config)
        elif choosed_model == 'ResNet34':
            model = ResnetBuilder.build_resnet_34(self.config)
        elif choosed_model == 'ResNet50':
            model = ResnetBuilder.build_resnet_50(self.config)
        elif choosed_model == 'ResNet101':
            model = ResnetBuilder.build_resnet_101(self.config)
        elif choosed_model == 'ResNet152':
            model = ResnetBuilder.build_resnet_152(self.config)
        elif choosed_model == 'DenseNet121':
            model = densenet121(self.config)
        elif choosed_model == 'DenseNet169':
            model = densenet169(self.config)
        elif choosed_model == 'DenseNet201':
            model = densenet201(self.config)
        elif choosed_model == 'DenseNet264':
            model = densenet264(self.config)
        else:
            model = -1

        return model
Example #5
0
    def _buildGraph(self, model):
        # image input
        x_in = tf.placeholder(tf.float32, name="x")
        x_in_reshape = tf.reshape(
            x_in, [-1, self.input_dim[1], self.input_dim[2], 3])
        # dropout
        dropout = tf.placeholder_with_default(1., shape=[], name="dropout")
        # label input
        y_in = tf.placeholder(dtype=tf.float32, name="y")
        # train or test
        is_train = tf.placeholder_with_default(True, shape=[], name="is_train")

        if model == 'IG':
            import GoogleNet
            logits, nett, ww = GoogleNet.googlenet(x_in_reshape,
                                                   num_classes=4,
                                                   is_training=is_train,
                                                   dropout_keep_prob=dropout,
                                                   scope='GoogleNet')
            print('Using Inception-V1')
        elif model == 'I2':
            import inception_v2
            logits, nett, ww = inception_v2.inception_v2(
                x_in_reshape,
                num_classes=4,
                is_training=is_train,
                dropout_keep_prob=dropout,
                min_depth=16,
                depth_multiplier=1.0,
                prediction_fn=slim.softmax,
                spatial_squeeze=True,
                reuse=None,
                scope='InceptionV2',
                global_pool=False)
            print('Using Inception-V2')
        elif model == 'I3':
            import inception_v3
            logits, nett, ww = inception_v3.inception_v3(
                x_in_reshape,
                num_classes=4,
                is_training=is_train,
                dropout_keep_prob=dropout,
                min_depth=16,
                depth_multiplier=1.0,
                prediction_fn=slim.softmax,
                spatial_squeeze=True,
                reuse=None,
                create_aux_logits=True,
                scope='InceptionV3',
                global_pool=False)
            print('Using Inception-V3')
        elif model == 'I4':
            import inception_v4
            logits, nett, ww = inception_v4.inception_v4(
                x_in_reshape,
                num_classes=4,
                is_training=is_train,
                dropout_keep_prob=dropout,
                reuse=None,
                create_aux_logits=True,
                scope='InceptionV4')
            print('Using Inception-V4')
        elif model == 'IR1':
            import inception_resnet_v1
            logits, nett, ww = inception_resnet_v1.inception_resnet_v1(
                x_in_reshape,
                num_classes=4,
                is_training=is_train,
                dropout_keep_prob=dropout,
                reuse=None,
                scope='InceptionRes1')
            print('Using Inception-Resnet-V1')
        elif model == 'IR2':
            import inception_resnet_v2
            logits, nett, ww = inception_resnet_v2.inception_resnet_v2(
                x_in_reshape,
                num_classes=4,
                is_training=is_train,
                dropout_keep_prob=dropout,
                reuse=None,
                create_aux_logits=True,
                scope='InceptionRes2')
            print('Using Inception-Resnet-V2')
        else:
            import GoogleNet
            logits, nett, ww = GoogleNet.googlenet(x_in_reshape,
                                                   num_classes=4,
                                                   is_training=is_train,
                                                   dropout_keep_prob=dropout,
                                                   scope='GoogleNet')
            print('Using Default: Inception-V1')

        pred = tf.nn.softmax(logits, name="prediction")

        global_step = tf.Variable(0, trainable=False)

        pred_cost = tf.losses.softmax_cross_entropy(onehot_labels=y_in,
                                                    logits=logits)

        tf.summary.scalar("{}_cost".format(model), pred_cost)

        tf.summary.tensor_summary("{}_pred".format(model), pred)

        train_op = tf.contrib.layers.optimize_loss(
            loss=pred_cost,
            learning_rate=self.learning_rate,
            global_step=global_step,
            optimizer="Adam")

        merged_summary = tf.summary.merge_all()

        return (x_in, dropout, is_train, y_in, logits, nett, ww, pred,
                pred_cost, global_step, train_op, merged_summary)
    def model_init(self, para_dict):
        #----var parsing
        model_shape = para_dict['model_shape']  #[N,H,W,C]
        infer_method = para_dict['infer_method']
        loss_method = para_dict['loss_method']
        opti_method = para_dict['opti_method']
        learning_rate = para_dict['learning_rate']
        save_dir = para_dict['save_dir']
        embed_length = para_dict['embed_length']

        #----tf_placeholder declaration
        tf_input = tf.placeholder(shape=model_shape,
                                  dtype=tf.float32,
                                  name='input')
        tf_keep_prob = tf.placeholder(dtype=tf.float32, name="keep_prob")
        tf_label_batch = tf.placeholder(shape=[None],
                                        dtype=tf.int32,
                                        name="label_batch")
        tf_phase_train = tf.placeholder(dtype=tf.bool, name="phase_train")

        #---inference selection
        if infer_method == "simple_resnet":
            prelogits = self.simple_resnet(tf_input, tf_keep_prob,
                                           self.class_num)
            embeddings = tf.nn.l2_normalize(prelogits,
                                            1,
                                            1e-10,
                                            name='embeddings')
        elif infer_method == "inception_resnet_v1":
            prelogits, _ = inception_resnet_v1(
                tf_input,
                tf_keep_prob,
                phase_train=tf_phase_train,
                bottleneck_layer_size=embed_length,
                weight_decay=0.0,
                reuse=None)
            prelogits = tf.identity(prelogits, name='prelogits')
            embeddings = tf.nn.l2_normalize(prelogits,
                                            1,
                                            1e-10,
                                            name='embeddings')
        elif infer_method == "inception_resnet_v2":
            prelogits = inception_resnet_v2(tf_input,
                                            tf_keep_prob,
                                            phase_train=tf_phase_train,
                                            bottleneck_layer_size=embed_length,
                                            weight_decay=0.0,
                                            reuse=None)
            embeddings = tf.nn.l2_normalize(prelogits,
                                            1,
                                            1e-10,
                                            name='embeddings')

        #---loss selection
        if loss_method == "cross_entropy":
            output = tf.layers.dense(inputs=prelogits,
                                     units=self.class_num,
                                     activation=None,
                                     name="output")
            prediction = tf.nn.softmax(output, name="prediction")
            loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=tf_label_batch, logits=output),
                name="loss")
        elif loss_method == "arc_loss":
            m1 = 1.0  # logits_margin1: 1.0  # m1: sphereface should >= 1
            m2 = 0.5  # logits_margin2: 0.2  # m2: cosineface should >= 0
            m3 = 0.0  # logits_margin3: 0.3  # m3: arcface    should >= 0
            s = 64.0  # logits_scale: 64.0

            norm_dense = NormDense(embed_length,
                                   self.class_num,
                                   output_name='prelogit')
            prelogit = norm_dense(embeddings)

            logit_cos = self.arcloss(embeddings, prelogit, tf_label_batch, m1,
                                     m2, m3, s)
            prediction = tf.nn.softmax(logit_cos, name='prediction')
            loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=tf_label_batch, logits=logit_cos),
                name='loss')

        #----optimizer selection
        if opti_method == "adam":
            optimizer = tf.train.AdamOptimizer(
                learning_rate=learning_rate).minimize(loss)
        elif opti_method == 'adagrad':
            optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)

        # ----face detection init
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(
            'src/models/shape_predictor_68_face_landmarks.dat')

        #----create the dir to save model weights(CKPT, PB)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        out_dir_prefix = os.path.join(save_dir, "model")
        saver = tf.train.Saver(max_to_keep=5)

        #----appoint PB node names
        pb_save_path = os.path.join(save_dir, "pb_model.pb")
        pb_save_list = ['prelogits', 'prediction', "embeddings"]

        #----create the log(JSON)
        count = 0
        for i in range(100):
            log_path = os.path.join(save_dir,
                                    "train_result_" + str(count) + ".json")
            if not os.path.exists(log_path):
                break
            count += 1
        print("log_path: ", log_path)
        self.content = self.log_update(self.content, para_dict)

        #----local var to global
        self.tf_input = tf_input
        self.tf_keep_prob = tf_keep_prob
        self.tf_label_batch = tf_label_batch
        self.tf_phase_train = tf_phase_train
        self.embeddings = embeddings
        self.optimizer = optimizer
        self.prediction = prediction
        self.out_dir_prefix = out_dir_prefix
        self.saver = saver
        self.pb_save_path = pb_save_path
        self.pb_save_list = pb_save_list
        self.log_path = log_path
        self.save_dir = save_dir
        self.model_shape = model_shape
        self.loss = loss
    def model_init(self,para_dict):
        #----var parsing
        model_shape = para_dict['model_shape']#[N,H,W,C]
        infer_method = para_dict['infer_method']
        loss_method = para_dict['loss_method']
        opti_method = para_dict['opti_method']
        learning_rate = para_dict['learning_rate']
        save_dir = para_dict['save_dir']
        embed_length = para_dict['embed_length']

        #----tf_placeholder declaration
        tf_input = tf.placeholder(tf.float32,shape=model_shape,name='input')
        tf_keep_prob = tf.placeholder(dtype=np.float32,name="keep_prob")
        tf_label_batch = tf.placeholder(dtype=tf.int32,shape=[None],name="label_batch")
        tf_phase_train = tf.placeholder(tf.bool,name="phase_train")

        #---inference selection
        if infer_method == "simple_resnet":
            output = self.simple_resnet(tf_input,tf_keep_prob,self.class_num)
        elif infer_method == "inception_resnet_v1":
            prelogits, _ = inception_resnet_v1(tf_input, tf_keep_prob, phase_train=tf_phase_train,
              bottleneck_layer_size=embed_length, weight_decay=0.0, reuse=None)
            embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        #---loss selection
        if loss_method == "cross_entropy":
            output = tf.layers.dense(inputs=prelogits, units=self.class_num, activation=None, name="output")
            prediction = tf.nn.softmax(output,name="prediction")
            loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf_label_batch,logits=output),
                                  name="loss")

        #----optimizer selection
        if opti_method == "adam":
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)


        #----create the dir to save model weights(CKPT, PB)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        out_dir_prefix = os.path.join(save_dir,"model")
        saver = tf.train.Saver(max_to_keep=5)


        #----appoint PB node names
        pb_save_path = os.path.join(save_dir,"pb_model.pb")
        pb_save_list = ['prediction','embeddings']


        #----create the log(JSON)
        count = 0
        for i in range(100):
            log_path = os.path.join(save_dir,"train_result_" + str(count) + ".json")
            if not os.path.exists(log_path):
                break
            count += 1
        print("log_path: ",log_path)
        self.content = self.log_update(self.content,para_dict)

        #----local var to global
        self.tf_input = tf_input
        self.tf_keep_prob = tf_keep_prob
        self.tf_label_batch = tf_label_batch
        self.tf_phase_train = tf_phase_train
        self.embeddings = embeddings
        self.optimizer = optimizer
        self.prediction = prediction
        self.out_dir_prefix = out_dir_prefix
        self.saver = saver
        self.pb_save_path = pb_save_path
        self.pb_save_list = pb_save_list
        self.log_path = log_path
        self.save_dir = save_dir
        self.model_shape = model_shape
        self.loss = loss
Example #8
0
import re
import tensorflow as tf
import keras as kr
import pickle as pkl
import inception_resnet_v1 as inception

weights = pkl.load(open("weights.pkl", "rb"))
model = inception.inception_resnet_v1((160, 160, 3))

for lay in model.layers:

    if re.search("Conv2d", lay.name):

        if not re.search("BatchNorm", lay.name):
            # Convolutional Layer
            if lay.name + "/biases:0" in weights.keys():
                lay.set_weights([
                    weights[lay.name + "/weights:0"],
                    weights[lay.name + "/biases:0"]
                ])
            else:
                lay.set_weights([weights[lay.name + "/weights:0"]])

        else:
            # BatchNorm layer
            beta = weights[lay.name + "/beta:0"]
            moving_mean = weights[lay.name + "/moving_mean:0"]
            moving_var = weights[lay.name + "/moving_variance:0"]
            lay.set_weights([beta, moving_mean, moving_var])

model.save_weights("InceptionResnetV1_weights.h5")