示例#1
0
def main(_):

    # for rm in [0.5, 0.6, 0.7, 0.8]:
    #     for ds_ind in range(5):
    #         config.data_rm = rm
    #         config.ds_ind = ds_ind
    # Directory generating.. for saving
    prepare_dirs(config)
    prepare_config_date(config, config.ds_ind)
    # Random seed settings
    rng = np.random.RandomState(config.random_seed)
    tf.set_random_seed(config.random_seed)

    # Model training
    trainer = Trainer(config, rng)
    save_config(config.model_dir, config)
    config.load_path = config.model_dir
    if config.is_train:
        trainer.train(save=True)
        result_dict = trainer.test()
    else:
        if not config.load_path:
            raise Exception("[!] You should specify `load_path` to "
                            "load a pretrained model")
        result_dict = trainer.test()
    save_results(config.result_dir, result_dict)
    accept_rate = evaluate_result(result_dict, method='KS-test', alpha=0.1)
    kl_div = evaluate_result(result_dict, method='KL')
    wasser_dis = evaluate_result(result_dict, method='wasser')
    sig_test = evaluate_result(result_dict, method='sig_test')
    print("The accept rate of KS test is ", accept_rate)
    print("The final KL div is ", kl_div)
    print("The wasser distance is ", wasser_dis)
    print("The AR of Sign Test is ", sig_test)
示例#2
0
def plot_gt_vs_ans():
    global test_videos
    ab_score = {}
    import pickle
    videos_pkl = "/home/lnn/workspace/pygcn/pygcn/ucf_crime_test.pkl"
    with open(videos_pkl, 'rb') as f:
        test_videos = pickle.load(f)
    for vid in test_videos:
        with np.load("/home/lnn/data/UCF_Crimes/flow2100/%s_flow.npz" % vid,
                     'r') as f:
            # with np.load("/home/lnn/data/UCF_Crimes/rgb4500/%s_rgb.npz" % vid, 'r') as f:
            boundary = f["scores"].mean(axis=1)
            boundary = [softmax(b) for b in boundary]
            abnormality = np.zeros(len(boundary))
            for i in range(len(boundary)):
                abnormality[i] = boundary[i][1]

            # abnormality = cv2.blur(abnormality, (1, 7)).flatten()

            ab_score[vid] = abnormality
        '''
        import h5py
        mat_path = "/home/zjx/workspace/abnormality-detection-Lu/data/testing_result/res_err_per_frame_regionalRes_%s.mat" % vid
        with h5py.File(mat_path, 'r') as f:
            abnormality = f["res_err"][:].flatten()
        '''
        pos_thr = 0.98
        neg_thr = 0.03
        min_cnt = 4
        max_cnt = 120
        pos = list(np.where(abnormality > pos_thr)[0])
        neg = list(np.where(abnormality < neg_thr)[0])
        if len(pos) < min_cnt:
            pos = np.argsort(1 - abnormality)[0:min_cnt].tolist()
        elif len(pos) > max_cnt:
            pos = np.argsort(1 - abnormality)[0:max_cnt].tolist()
        if len(neg) < min_cnt:
            neg = np.argsort(abnormality)[0:min_cnt].tolist()
        elif len(neg) > max_cnt:
            neg = np.argsort(abnormality)[0:max_cnt].tolist()

        mask = list(set(pos + neg))

        continue
        gt = test_videos[vid]
        fig = plt.figure()
        ax1 = fig.add_subplot(211)
        x1 = range(len(gt))
        ax1.set_ylim([0, 1.1])
        ax1.set_xlim([0, len(x1)])
        ax1.plot(x1, gt, label="Ground Truth", color='g')

        ax2 = fig.add_subplot(212)  # ax1.twinx()
        x2 = range(len(abnormality))
        ax2.set_ylim([0, 1.1])  # 0.05 + 0.21])
        ax2.set_xlim([0, len(x2)])
        ax2.plot(x2, abnormality, label="C3D Prediction", color='r')
        ax2.scatter(mask, abnormality[mask])
        # ax2.set_ylabel("Abnormality Score")

        plt.title(vid)
        plt.legend(loc="best")
        plt.show()
    from utils import evaluate_result
    evaluate_result(ab_score)
示例#3
0
        c, tf = np.unique(cluster, return_counts=True)
        cluster2tf = dict(zip(c, tf))
        for c in cluster2tf:
            cluster2tf[c] /= float(len(cluster))
        tf_idf = []
        for c in cluster:
            tf_idf.append(cluster2tf[c] * cluster2idf_normal[c])
        if "Normal" not in vid:
            thr_arr = np.sort(np.unique(tf_idf))
            if len(thr_arr) > 3:
                thr = thr_arr[-4]
            elif len(thr_arr) > 2:
                thr = thr_arr[-3]
            elif len(thr_arr) > 1:
                thr = thr_arr[-2]
            else:
                thr = thr_arr[-1]
            vid2abscore[vid] = np.where(tf_idf >= thr, 1, 0)
        continue
        fig = plt.figure()
        ax1 = fig.add_subplot(211)
        ax2 = fig.add_subplot(212)
        #print vid, tf_idf
        ax1.plot(tf_idf, color='r')
        ax2.plot(ab_score, color='g')
        plt.title(vid)
        plt.show()

    from utils import evaluate_result
    evaluate_result(vid2abscore)
示例#4
0
def main():
    # load config file
    config = load_config(config_path)

    # build dict for token (vocab_dict) and char (vocab_c_dict)
    vocab_dict, vocab_c_dict = build_dict(vocab_path, vocab_char_path)

    # load pre-trained embedding
    # W_init: token index * token embeding
    # embed_dim: embedding dimension
    W_init, embed_dim = load_word2vec_embedding(word_embedding_path, vocab_dict)
    
    K = 3

    # generate train/valid examples
    train_data, sen_cut_train = generate_examples(train_path, vocab_dict, vocab_c_dict, config, "train")
    dev_data, sen_cut_dev = generate_examples(valid_path, vocab_dict, vocab_c_dict, config, "dev")

    #------------------------------------------------------------------------
    # training process begins
    hidden_size = config['nhidden']
    batch_size = config['batch_size']

    coref_model = model.CorefQA(hidden_size, batch_size, K, W_init, config).to(device)

    if len(sys.argv) > 4 and str(sys.argv[4]) == "load":
        try:
            coref_model.load_state_dict(torch.load(torch_model_p))
            print("saved model loaded")
        except:
            print("no saved model")

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(coref_model.parameters(), lr=config['learning_rate']) # TODO: use hyper-params in paper

    iter_index = 0
    batch_acc_list = []
    batch_loss_list = []
    dev_acc_list = []

    max_iter = int(config['num_epochs'] * len(train_data) / batch_size)
    print("max iteration number: " + str(max_iter))

    while True:
        # building batch data
        # batch_xxx_data is a list of batch data (len 15)
        # [dw, m_dw, qw, m_qw, dc, m_dc, qc, m_qc, cd, m_cd, a, dei, deo, dri, dro]
        batch_train_data, sen_cut_batch = generate_batch_data(train_data, config, "train", -1, sen_cut_train)  # -1 means random sampling
        # dw, m_dw, qw, m_qw, dc, m_dc, qc, m_qc, cd, m_cd, a, dei, deo, dri, dro = batch_train_data

        print(len(sen_cut_batch))

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward pass
        dw, dc, qw, qc, cd, cd_m = extract_data(batch_train_data)
        cand_probs = coref_model(dw, dc, qw, qc, cd, cd_m, sen_cut_batch) # B x Cmax

        answer = torch.tensor(batch_train_data[10]).type(torch.LongTensor) # B x 1
        loss = criterion(cand_probs, answer)

        # evaluation process
        acc_batch = cal_acc(cand_probs, answer, batch_size)
        batch_acc_list.append(acc_batch)
        batch_loss_list.append(loss)
        dev_acc_list = evaluate_result(iter_index, config, dev_data, batch_acc_list, batch_loss_list, dev_acc_list, coref_model, sen_cut_dev)

        # save model
        if iter_index % config['model_save_frequency'] == 0 and len(sys.argv) > 4:
            torch.save(coref_model.state_dict(), torch_model_p)

        # back-prop
        loss.backward()
        optimizer.step()

        # check stopping criteria
        iter_index += 1
        if iter_index > max_iter: break