Ejemplo n.º 1
0
def perform_icarl(examplar_size, s_class):
    if s_class==1:
        saver_load.restore(sess, '../model/fsize_10_flam_0.0001_ssize_2000_class_2.ckpt')
    else:
        print('load model: icarl_ssize_{}_class_{}.ckpt'.format(examplar_size, s_class))
        saver_load.restore(sess,
            '../model/icarl_ssize_{}_class_{}.ckpt'.format(examplar_size, s_class))

        data_1 = exclude_data(data_all, range(s_class,6))
        # train_model(data_1, train_op, train_steps)
        # construct examplar
        predict_label_train,predict_label,final_train_fea,final_test_fea=whole_set_check(data_1)
        evaluate_model(data_1[5],np.array(predict_label))
        evaluate_model(data_1[0],np.array(predict_label_train))
        print('Here is the result of NME:')
        test_predict = nme_pred(final_train_fea, final_test_fea, data_1[0])
        evaluate_model(data_1[5],np.array(test_predict))
        examplar_index = construct_examplar(final_train_fea,
            data_1[0], examplar_size, all_train_label)
        examplar = get_support_data(data_1, examplar_index)

        # perform training
        data_2 =  exclude_data(data_all, range(s_class)+range(s_class+1,6))
        used_data = merge_data(examplar, data_2)
        train_model(used_data, train_op, train_steps)
        predict_label_train,predict_label,final_train_fea,final_test_fea=whole_set_check(used_data)
        evaluate_model(used_data[5],np.array(predict_label))
        evaluate_model(used_data[0],np.array(predict_label_train))
        print('Here is the result of NME:')
        test_predict = nme_pred(final_train_fea, final_test_fea, used_data[0])
        evaluate_model(used_data[5],np.array(test_predict))
    # save model
    saver_load.save(sess,
        '../model/icarl_ssize_{}_class_{}.ckpt'.format(examplar_size, s_class+1))    
Ejemplo n.º 2
0
def check_support_data_batch_performance(train_op, exclude_list,
                                         final_train_fea, data_merged,
                                         support_size, train_steps, aug_rate):
    if support_size > 0:
        # construct the support data for the first and second batch
        support_data_index = select_support_data_svm(final_train_fea,
                                                     data_merged[0],
                                                     support_size)
        support_data = get_support_data(data_merged, support_data_index)

        # load the second batch data
        data_2 = exclude_data(data_all, exclude_list)
        data_2 = append_old_feature_new_data(data_2)
        data_merged = merge_data(support_data, data_2)
        used_data = merge_data(augmentation(support_data, aug_rate), data_2)
    if support_size == 0:
        used_data = exclude_data(data_all, exclude_list)
        used_data = append_old_feature_new_data(used_data)
        data_merged = merge_data(used_data, data_merged)

    train_model(used_data, train_op, train_steps)
    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_merged)
    evaluate_model(data_merged[5], np.array(predict_label))
    evaluate_model(data_merged[0], np.array(predict_label_train))

    return final_train_fea, data_merged
Ejemplo n.º 3
0
def restart_from_ckpt(f_size, f_lam, s_size, s_class):
    if s_class==1:
        data_1 = exclude_data(data_all, range(2,6))

        train_model(data_1, train_op, train_steps)

        # check performance
        predict_label_train,predict_label,final_train_fea,final_test_fea=whole_set_check(data_1)
        evaluate_model(data_1[5],np.array(predict_label))
        evaluate_model(data_1[0],np.array(predict_label_train))

    if s_class>=2:
        print('load model: fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
            f_lam, s_size, s_class))
        saver_load.restore(sess,'../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
            f_lam, s_size, s_class))

        data_1 = exclude_data(data_all, range(s_class,6))
        predict_label_train,predict_label,final_train_fea,final_test_fea=whole_set_check(data_1)
        evaluate_model(data_1[5],np.array(predict_label))
        evaluate_model(data_1[0],np.array(predict_label_train))

        # continue
        new_step = set_fisher_regularizer(f_lam, data_1, f_size)
        train_loss, data_merged = check_support_data_batch_performance(new_step,
            range(s_class)+range(s_class+1,6), 
            final_train_fea, data_1, s_size, train_steps, 2)

    saver_load.save(sess,'../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
        f_lam, s_size, s_class+1))
Ejemplo n.º 4
0
def incremental_all_data():
    # get partial data
    data_1 = exclude_data(data_all, range(2, 6))
    data_1 = append_old_feature_new_data(data_1)

    train_model(data_1, train_op, train_steps)

    # check performance
    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_1)
    evaluate_model(data_1[5], np.array(predict_label))
    evaluate_model(data_1[0], np.array(predict_label_train))

    # get further data
    data_2 = exclude_data(data_all, range(2) + range(3, 6))
    data_2 = append_old_feature_new_data(data_2)
    data_merged = merge_data(data_1, data_2)
    train_model(data_merged, train_op, train_steps)

    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_merged)
    evaluate_model(data_merged[5], np.array(predict_label))
    evaluate_model(data_merged[0], np.array(predict_label_train))

    # get further data
    data_2 = exclude_data(data_all, range(3) + range(4, 6))
    data_2 = append_old_feature_new_data(data_2)
    data_merged = merge_data(data_merged, data_2)
    train_model(data_merged, train_op, train_steps)

    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_merged)
    evaluate_model(data_merged[5], np.array(predict_label))
    evaluate_model(data_merged[0], np.array(predict_label_train))

    # get further data
    data_2 = exclude_data(data_all, range(4) + range(5, 6))
    data_2 = append_old_feature_new_data(data_2)
    data_merged = merge_data(data_merged, data_2)
    train_model(data_merged, train_op, train_steps)

    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_merged)
    evaluate_model(data_merged[5], np.array(predict_label))
    evaluate_model(data_merged[0], np.array(predict_label_train))

    data_2 = exclude_data(data_all, range(5))
    data_2 = append_old_feature_new_data(data_2)
    data_merged = merge_data(data_merged, data_2)
    train_model(data_merged, train_op, train_steps)

    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_merged)
    evaluate_model(data_merged[5], np.array(predict_label))
    evaluate_model(data_merged[0], np.array(predict_label_train))
Ejemplo n.º 5
0
def check_feature_representation(f_size, f_lam, s_size):
    # load the initial data and find the original support data using the first model
    print('load model: fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(
        f_size, f_lam, s_size, 2))
    saver_load.restore(
        sess, '../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(
            f_size, f_lam, s_size, 2))
    data_1 = exclude_data(data_all, range(2, 6))
    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_1)
    evaluate_model(data_1[5], np.array(predict_label))
    evaluate_model(data_1[0], np.array(predict_label_train))

    feature_list = list()
    feature_list.append(data_1[0])
    feature_list.append(final_train_fea)
    for s_class in range(3, 7):
        print('load model: fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(
            f_size, f_lam, s_size, s_class))
        saver_load.restore(
            sess, '../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(
                f_size, f_lam, s_size, s_class))

        predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
            data_1)
        evaluate_model(data_1[5], np.array(predict_label))
        evaluate_model(data_1[0], np.array(predict_label_train))
        feature_list.append(final_train_fea)
    with open('for_tsne_{}.pickle'.format(f_lam), 'w') as f:
        cPickle.dump(feature_list, f)
Ejemplo n.º 6
0
def check_fix_rep_batch_performance(train_op, exclude_list, data_merged):
    used_data = exclude_data(data_all, exclude_list)
    data_merged = merge_data(used_data, data_merged)

    train_model(used_data, train_op, train_steps)
    predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
        data_merged)
    evaluate_model(data_merged[5], np.array(predict_label))
    evaluate_model(data_merged[0], np.array(predict_label_train))
Ejemplo n.º 7
0
def restart_from_ckpt(s_class):
    if s_class == 1:
        data_1 = exclude_data(data_all, range(2, 6))
        saver_load.restore(
            sess, '../model/fsize_100_flam_1000_ssize_2000_class_2.ckpt')
        # check performance
        predict_label_train, predict_label, final_train_fea, final_test_fea = whole_set_check(
            data_1)
        evaluate_model(data_1[5], np.array(predict_label))
        evaluate_model(data_1[0], np.array(predict_label_train))

    if s_class >= 2:
        print('load model: fix_rep_class_{}.ckpt'.format(s_class))
        saver_load.restore(sess,
                           '../model/fix_rep_class_{}.ckpt'.format(s_class))

        data_1 = exclude_data(data_all, range(s_class, 6))

        check_fix_rep_batch_performance(train_op,
                                        range(s_class) + range(s_class + 1, 6),
                                        data_1)

    saver_load.save(sess, '../model/fix_rep_class_{}.ckpt'.format(s_class + 1))
Ejemplo n.º 8
0
def support_with_ewc(f_size, f_lam, s_size):
    # get partial data
    data_1 = exclude_data(data_all, range(2,6))

    train_model(data_1, train_op, train_steps)

    # check performance
    predict_label_train,predict_label,final_train_fea,final_test_fea=whole_set_check(data_1)
    evaluate_model(data_1[5],np.array(predict_label))
    evaluate_model(data_1[0],np.array(predict_label_train))

    # set the regularizer
    new_step = set_fisher_regularizer(f_lam, data_1, f_size)
    train_loss, data_merged = check_support_data_batch_performance(new_step,range(2)+range(3,6), 
        final_train_fea, data_1, s_size, train_steps, 2)

    saver_load.save(sess,'../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
        f_lam, s_size, 3))

    new_step = set_fisher_regularizer(f_lam, data_merged, f_size)
    train_loss, data_merged = check_support_data_batch_performance(new_step,range(3)+range(4,6), 
        train_loss, data_merged, s_size, train_steps, 2)

    saver_load.save(sess,'../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
        f_lam, s_size, 4))

    new_step = set_fisher_regularizer(f_lam, data_merged, f_size)
    train_loss, data_merged = check_support_data_batch_performance(new_step,range(4)+range(5,6), 
        train_loss, data_merged, s_size, train_steps, 2)

    saver_load.save(sess,'../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
        f_lam, s_size, 5))

    new_step = set_fisher_regularizer(f_lam, data_merged, f_size)
    train_loss, data_merged = check_support_data_batch_performance(new_step,range(5), 
        train_loss, data_merged, s_size, train_steps, 2)

    saver_load.save(sess,'../model/fsize_{}_flam_{}_ssize_{}_class_{}.ckpt'.format(f_size,
        f_lam, s_size, 6))