Beispiel #1
0
def test_each_subject(
        w, b, sbjt_start_idx
):  # In case when test the model with the whole rest frames
    batch_size = 10
    three_layers = feature_layer(batch_size, FLAGS.num_au)
    print("!!!!!!!!!!!!!!!!!!")
    print(w.shape)
    print("!!!!!!!!!!!!!!!!!!")

    three_layers.loadWeight(FLAGS.vae_model,
                            FLAGS.au_idx,
                            num_au_for_rm=FLAGS.num_au,
                            w=w,
                            b=b)

    test_subjects = os.listdir(FLAGS.testset_dir)
    test_subjects.sort()

    test_subject = test_subjects[sbjt_start_idx]

    print("====================> subject: ", test_subject)
    data = pickle.load(open(FLAGS.testset_dir + test_subject, "rb"),
                       encoding='latin1')
    test_features = data['test_features']
    y_hat = three_layers.model_intensity.predict(test_features)
    print(y_hat)
    if FLAGS.au_idx < 8:
        lab = data['lab'][:, FLAGS.au_idx]
        y_lab = np.reshape(lab, (lab.shape[0], 1, lab.shape[1]))
    else:
        y_lab = data['lab']
    return y_hat, y_lab
Beispiel #2
0
    def _load_weight_s(sbjt_start_idx):
        batch_size = 10
        # 모든 au 를 이용하여 한 모델을 만든경우 그 한 모델만 로드하면됨.
        if FLAGS.model.startswith('s1'):
            three_layers = feature_layer(batch_size, TOTAL_NUM_AU)
            three_layers.loadWeight(FLAGS.vae_model_to_test,
                                    FLAGS.au_idx,
                                    num_au_for_rm=TOTAL_NUM_AU)
        # 각 au별로 다른 모델인 경우 au별 weight을 쌓아줘야함
        else:
            three_layers = feature_layer(batch_size, 1)
            all_au = [
                'au1', 'au2', 'au4', 'au6', 'au9', 'au12', 'au25', 'au26'
            ]
            if FLAGS.au_idx < TOTAL_NUM_AU: all_au = [all_au[FLAGS.au_idx]]
            w_arr = None
            b_arr = None
            for au in all_au:
                if FLAGS.model.startswith('s3'):
                    load_model_path = FLAGS.vae_model_to_test + '/' + FLAGS.model + '_' + au + '_kshot' + str(
                        FLAGS.update_batch_size) + '_iter100'
                elif FLAGS.model.startswith('s4'):
                    load_model_path = FLAGS.vae_model_to_test + '/' + au + '.sub' + str(
                        sbjt_start_idx)
                else:
                    load_model_path = FLAGS.vae_model_to_test + '/' + FLAGS.model + '_' + au + '_kshot' + str(
                        FLAGS.update_batch_size
                    ) + '_iter200_kshot10_iter10_nobatch_adam_noinit'
                three_layers.loadWeight(load_model_path, au)
                print('=============== Model S loaded from ', load_model_path)
                w = three_layers.model_intensity.layers[-1].get_weights()[0]
                b = three_layers.model_intensity.layers[-1].get_weights()[1]
                print(
                    '--------------------------------------------------8--------'
                )
                if w_arr is None:
                    w_arr = w
                    b_arr = b
                else:
                    w_arr = np.hstack((w_arr, w))
                    b_arr = np.vstack((b_arr, b))

        return w_arr, b_arr
Beispiel #3
0
    def make_data_tensor(self, kshot_seed):
        print("===================================make_data_tensor in daga_generator2")
        print(">>>>>>> sampling seed: ", kshot_seed)
        folders = self.metatrain_character_folders
        print(">>>>>>> train folders: ", folders)

        # make list of files
        print('Generating filenames')
        inputa_features = []
        inputb_features = []
        labelas = []
        labelbs = []
        # To have totally different inputa and inputb, they should be sampled at the same time and then splitted.
        for sub_folder in folders:  # 쓰일 task수만큼만 경로 만든다. 이 task들이 iteration동안 어차피 반복될거니까
            # random.shuffle(sampled_character_folders)
            off_imgs, on_imgs, off_labels, on_labels = get_kshot_feature_w_all_labels(sub_folder,
                                                                                      FLAGS.feature_path,
                                                                                      kshot_seed,
                                                                                      nb_samples=FLAGS.update_batch_size * 2,
                                                                                      check_sample=FLAGS.check_sample)
            # Split data into a/b
            half_off_img = int(len(off_imgs) / 2)
            half_on_img = int(len(on_imgs) / 2)
            inputa_this_subj = []
            inputb_this_subj = []
            labela_this_subj = []
            labelb_this_subj = []
            for i in range(half_off_img):
                inputa_this_subj.append([float(k) for k in off_imgs[2 * i]])
                inputb_this_subj.append([float(k) for k in off_imgs[2 * i + 1]])
                labela_this_subj.append(off_labels[2 * i])
                labelb_this_subj.append(off_labels[2 * i + 1])
            for i in range(half_on_img):
                inputa_this_subj.append([float(k) for k in on_imgs[2 * i]])
                inputb_this_subj.append([float(k) for k in on_imgs[2 * i + 1]])
                labela_this_subj.append(on_labels[2 * i])
                labelb_this_subj.append(on_labels[2 * i + 1])

            inputa_features.extend(inputa_this_subj)
            inputb_features.extend(inputb_this_subj)
            labelas.extend(labela_this_subj)
            labelbs.extend(labelb_this_subj)

        print(">>>>>>>>>>>>>>>>> embedding mdo--: ", FLAGS.vae_model)

        ################################### dim reduction ####################################
        three_layers = feature_layer(10, FLAGS.num_au)
        three_layers.loadWeight(FLAGS.vae_model, FLAGS.au_idx, num_au_for_rm=FLAGS.num_au)

        inputa_latent_feat = three_layers.model_final_latent_feat.predict(inputa_features)
        inputb_latent_feat = three_layers.model_final_latent_feat.predict(inputb_features)
        print(">>> z_arr len:", len(inputa_latent_feat))

        #################################### make tensor ###############################
        inputa_latent_feat_tensor = tf.convert_to_tensor(inputa_latent_feat)
        inputa_latent_feat_tensor = tf.reshape(inputa_latent_feat_tensor,
                                               [self.total_num_au * FLAGS.meta_batch_size, FLAGS.update_batch_size * 2,
                                                self.weight_dim]) # (aus*subjects, 2K, latent_dim)
        inputb_latent_feat_tensor = tf.convert_to_tensor(inputb_latent_feat)
        inputb_latent_feat_tensor = tf.reshape(inputb_latent_feat_tensor,
                                               [self.total_num_au * FLAGS.meta_batch_size, FLAGS.update_batch_size * 2,
                                                self.weight_dim])


        labelas = np.array(labelas) # (aus*subjects*K*2 = num of task * 2K, au)
        labelbs = np.array(labelbs)
        labelas_tensor = tf.convert_to_tensor(labelas)
        labelbs_tensor = tf.convert_to_tensor(labelbs)
        labelas_tensor = tf.reshape(labelas_tensor,
                                    [self.total_num_au * FLAGS.meta_batch_size, FLAGS.update_batch_size * 2,
                                     self.total_num_au])  # (aus*subjects, 2K, au)
        labelbs_tensor = tf.reshape(labelbs_tensor,
                                    [self.total_num_au * FLAGS.meta_batch_size, FLAGS.update_batch_size * 2,
                                     self.total_num_au])


        return inputa_latent_feat_tensor, inputb_latent_feat_tensor, labelas_tensor, labelbs_tensor
Beispiel #4
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu

    if FLAGS.train == False:
        orig_meta_batch_size = FLAGS.meta_batch_size
        # always use meta batch size of 1 when testing.
        FLAGS.meta_batch_size = 1

    data_generator = DataGenerator()

    dim_output = data_generator.num_classes
    dim_input = data_generator.dim_input

    inputa, inputb, labela, labelb = data_generator.make_data_tensor()
    metatrain_input_tensors = {'inputa': inputa, 'inputb': inputb, 'labela': labela, 'labelb': labelb}

    # pred_weights = data_generator.pred_weights
    model = MAML(dim_input, dim_output)
    model.construct_model(input_tensors=metatrain_input_tensors, prefix='metatrain_')
    model.summ_op = tf.summary.merge_all()

    saver = loader = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=20)

    sess = tf.InteractiveSession()


    if not FLAGS.train:
        # change to original meta batch size when loading model.
        FLAGS.meta_batch_size = orig_meta_batch_size

    if FLAGS.train_update_batch_size == -1:
        FLAGS.train_update_batch_size = FLAGS.update_batch_size
    if FLAGS.train_update_lr == -1:
        FLAGS.train_update_lr = FLAGS.update_lr

    trained_model_dir = 'cls_' + str(FLAGS.num_classes) + '.mbs_' + str(FLAGS.meta_batch_size) + '.ubs_' + str(
        FLAGS.train_update_batch_size) + '.numstep' + str(FLAGS.num_updates) + '.updatelr' + str(
        FLAGS.train_update_lr) + '.metalr' + str(FLAGS.meta_lr)

    print(">>>>> trained_model_dir: ", FLAGS.logdir + '/' + trained_model_dir)


    resume_itr = 0

    tf.global_variables_initializer().run()
    tf.train.start_queue_runners()

    print("================================================================================")
    print('initial weights norm: ', np.linalg.norm(sess.run('model/w1:0')))
    print('initial last weights: ', sess.run('model/w1:0')[-1])
    print('initial bias: ', sess.run('model/b1:0'))
    print("================================================================================")





    ################## Train ##################

    if FLAGS.resume:
        model_file = None
        if FLAGS.model.startswith('m2'):
            trained_model_dir = 'sbjt' + str(FLAGS.sbjt_start_idx) + '.ubs_' + str(
                FLAGS.train_update_batch_size) + '.numstep' + str(FLAGS.num_updates) + '.updatelr' + str(
                FLAGS.train_update_lr) + '.metalr' + str(FLAGS.meta_lr)
        model_file = tf.train.latest_checkpoint(FLAGS.logdir + '/' + trained_model_dir)
        print(">>>>> trained_model_dir: ", FLAGS.logdir + '/' + trained_model_dir)

        w = None
        b = None
        print(">>>> model_file1: ", model_file)

        if model_file:
            if FLAGS.test_iter > 0:
                files = os.listdir(model_file[:model_file.index('model')])
                if 'model' + str(FLAGS.test_iter) + '.index' in files:
                    model_file = model_file[:model_file.index('model')] + 'model' + str(FLAGS.test_iter)
                    print(">>>> model_file2: ", model_file)
            print("1. Restoring model weights from " + model_file)
            saver.restore(sess, model_file)
            b = sess.run('model/b1:0').tolist()
            print("updated weights from ckpt: ", np.array(b))
            ind1 = model_file.index('model')
            resume_itr = int(model_file[ind1 + 5:])

    elif FLAGS.keep_train_dir:  # when the model needs to be initialized from another model.
        resume_itr = 0
        print('resume_itr: ', resume_itr)
        model_file = tf.train.latest_checkpoint(FLAGS.keep_train_dir)
        print(">>>>> base_model_dir: ", FLAGS.keep_train_dir)

        if FLAGS.test_iter > 0:
            files = os.listdir(model_file[:model_file.index('model')])
            if 'model' + str(FLAGS.test_iter) + '.index' in files:
                model_file = model_file[:model_file.index('model')] + 'model' + str(FLAGS.test_iter)
                print(">>>> model_file2: ", model_file)

        print("2. Restoring model weights from " + model_file)
        saver.restore(sess, model_file)
        print("updated weights from ckpt: ", sess.run('model/b1:0'))

    elif FLAGS.model.startswith('s4'):
        from feature_layers import feature_layer
        three_layers = feature_layer(10, 1)
        print('FLAGS.base_vae_model: ', FLAGS.base_vae_model)
        three_layers.model_intensity.load_weights(FLAGS.base_vae_model + '.h5')
        w = three_layers.model_intensity.layers[-1].get_weights()[0]
        b = three_layers.model_intensity.layers[-1].get_weights()[1]
        print('s2 b: ', b)
        print('s2 w: ', w)
        print('-----------------------------------------------------------------')
        with tf.variable_scope("model", reuse=True) as scope:
            scope.reuse_variables()
            b1 = tf.get_variable("b1", [1, 2]).assign(np.array(b))
            w1 = tf.get_variable("w1", [300, 1, 2]).assign(np.array(w))
            sess.run(b1)
            sess.run(w1)
        print("after: ", sess.run('model/b1:0'))
        print("after: ", sess.run('model/w1:0'))

    if not FLAGS.all_sub_model:
        trained_model_dir = 'sbjt' + str(FLAGS.sbjt_start_idx) + '.ubs_' + str(
            FLAGS.train_update_batch_size) + '.numstep' + str(FLAGS.num_updates) + '.updatelr' + str(
            FLAGS.train_update_lr) + '.metalr' + str(FLAGS.meta_lr)

    print("================================================================================")

    train(model, saver, sess, trained_model_dir, metatrain_input_tensors, resume_itr)

    end_time = datetime.now()
    elapse = end_time - start_time
    print("================================================================================")
    print(">>>>>> elapse time: " + str(elapse))
    print("================================================================================")
if nb_iter > 0: model_train.save_weights(model_name)
import cv2

if args.deep_feature is not '':
    # directory to save the features
    if not os.path.exists(args.deep_feature):
        os.makedirs(args.deep_feature)
    # from resnet feature 2048 -> 300
    if 'resnet' in args.deep_feature:
        print('==================================================')
        print('FROM RESNET')
        print('==================================================')
        print(">>>>>>>>>>>>>>>>> embedding model: ", args.restored_model)
        from feature_layers import feature_layer

        three_layers = feature_layer(10, TOTAL_AU)
        three_layers.loadWeight(args.restored_model)

        path = '/home/ml1323/project/robert_data/DISFA_new/detected_disfa_features/'
        subjects = os.listdir(path)
        subjects.sort()
        for subject in subjects:
            detected_frame_idx = []
            all_feat_vec = []
            with open(os.path.join(path, subject), 'r') as f:
                lines = f.readlines()
                for line in lines:
                    line = line.split(',')
                    frame_idx = int(line[2].split('frame')[1])
                    feat_vec = [float(elt) for elt in line[3:]]
Beispiel #6
0
    def make_data_tensor(self, train=True):
        folders = self.metatrain_character_folders
        print(">>>>>>> train folders: ", folders)

        # make list of files
        print('Generating filenames')
        inputa_features = []
        inputb_features = []
        labelas = []
        labelbs = []
        # To have totally different inputa and inputb, they should be sampled at the same time and then split.
        for sub_folder in folders:
            if train:
                off_imgs, on_imgs = get_kshot_feature(
                    sub_folder,
                    FLAGS.feature_path,
                    FLAGS.kshot_seed,
                    nb_samples=FLAGS.update_batch_size * 2,
                    validate=False)
            else:
                off_imgs, on_imgs = get_kshot_feature(
                    sub_folder,
                    FLAGS.feature_path,
                    FLAGS.kshot_seed,
                    nb_samples=FLAGS.update_batch_size * 2,
                    validate=True)
            # Split data into a/b
            half_off_img = int(len(off_imgs) / 2)
            half_on_img = int(len(on_imgs) / 2)
            inputa_this_subj = []
            inputb_this_subj = []
            for i in range(half_off_img):
                inputa_this_subj.append([float(k) for k in off_imgs[2 * i]])
                inputb_this_subj.append(
                    [float(k) for k in off_imgs[2 * i + 1]])
            for i in range(half_on_img):
                inputa_this_subj.append([float(k) for k in on_imgs[2 * i]])
                inputb_this_subj.append([float(k) for k in on_imgs[2 * i + 1]])
            labela_this_subj = [0] * half_off_img
            labela_this_subj.extend([1] * half_on_img)
            labelb_this_subj = [0] * half_off_img
            labelb_this_subj.extend([1] * half_on_img)

            np.random.seed(1)
            np.random.shuffle(inputa_this_subj)
            np.random.seed(1)
            np.random.shuffle(labela_this_subj)

            np.random.seed(2)
            np.random.shuffle(inputb_this_subj)
            np.random.seed(2)
            np.random.shuffle(labelb_this_subj)

            inputa_features.extend(inputa_this_subj)
            inputb_features.extend(inputb_this_subj)
            labelas.extend(labela_this_subj)
            labelbs.extend(labelb_this_subj)

        print("--------------------------------------------")
        print(">>>>>>>>>>>>>>>>> vae_model: ", FLAGS.vae_model)
        print(">>>>>>>>>>>>>>>>>> random seed for kshot: ", FLAGS.kshot_seed)
        print(">>>>>>>>>>>>>>>>>> random seed for weight: ", FLAGS.weight_seed)

        #################################################################################

        three_layers = feature_layer(10, FLAGS.num_au)
        three_layers.loadWeight(FLAGS.vae_model,
                                FLAGS.au_idx,
                                num_au_for_rm=FLAGS.num_au)
        inputa_latent_feat = three_layers.model_final_latent_feat.predict(
            inputa_features)
        inputb_latent_feat = three_layers.model_final_latent_feat.predict(
            inputb_features)
        # inputa_latent_feat = inputa_features
        # inputb_latent_feat = inputb_features
        print(">>> z_arr len:", len(inputa_latent_feat))
        #################################################################################

        inputa_latent_feat_tensor = tf.convert_to_tensor(inputa_latent_feat)
        print(inputa_latent_feat_tensor.shape)
        inputa_latent_feat_tensor = tf.reshape(inputa_latent_feat_tensor, [
            FLAGS.meta_batch_size, FLAGS.update_batch_size * 2, self.weight_dim
        ])
        inputb_latent_feat_tensor = tf.convert_to_tensor(inputb_latent_feat)
        inputb_latent_feat_tensor = tf.reshape(inputb_latent_feat_tensor, [
            FLAGS.meta_batch_size, FLAGS.update_batch_size * 2, self.weight_dim
        ])

        labelas_tensor = tf.convert_to_tensor(labelas)
        labelbs_tensor = tf.convert_to_tensor(labelbs)
        labelas_tensor = tf.one_hot(labelas_tensor,
                                    self.num_classes)  ## (num_of_tast, 2NK, N)
        labelbs_tensor = tf.one_hot(labelbs_tensor,
                                    self.num_classes)  ## (num_of_tast, 2NK, N)
        labelas_tensor = tf.reshape(
            labelas_tensor,
            [FLAGS.meta_batch_size, FLAGS.update_batch_size * 2, 2])
        labelbs_tensor = tf.reshape(
            labelbs_tensor,
            [FLAGS.meta_batch_size, FLAGS.update_batch_size * 2, 2])

        return inputa_latent_feat_tensor, inputb_latent_feat_tensor, labelas_tensor, labelbs_tensor