예제 #1
0
from matplotlib import pyplot as plt

videos_pkl_train = "/home/lnn/workspace/pygcn/pygcn/ucf_crime_train.pkl"
feature_folder = "/home/lnn/workspace/UCF_Crimes/c3d_features_all/_iter_4500/"
prediction_folder = "/home/lnn/workspace/UCF_Crimes/c3d_features_1_high_conf_sampling_0.6/_iter_1000/"
gcn_model_path = "/home/lnn/workspace/pygcn/pygcn/models_c3d_2_0.6/c3d_4.pth"
modality = "c3d"
gpu_id = 0
output_folder = "/home/lnn/workspace/pygcn/output_c3d_high_conf_2/"

if __name__ == '__main__':
    param = (4, 0.7, 0.1, 2, 2000, -0.1)
    ucf_crime_train = UCFCrimeSlow(videos_pkl_train,
                                   prediction_folder,
                                   feature_folder,
                                   modality,
                                   graph_generator=soft_uniform_sampling,
                                   graph_generator_param=param,
                                   random_crop=False)
    train_loader = DataLoader(dataset=ucf_crime_train, num_workers=16)
    model = NoiseFilter(nfeat=4096, nclass=1)

    if gpu_id != -1:
        model = model.cuda(gpu_id)

    model.load_state_dict(torch.load(gcn_model_path))
    model.eval()
    vid2ans = {}
    for step, data in enumerate(train_loader):
        (feat, adj, labeled_index_in_the_graph,
         labeled_index), pred, vid = data
def train_gcn(param):
    torch.cuda.empty_cache()
    videos_pkl_train = "/home/lnn/workspace/pygcn/pygcn/ucf_crime_train.pkl"
    videos_pkl_test = "/home/lnn/workspace/pygcn/pygcn/ucf_crime_test.pkl"
    feature_folder = "/home/lnn/workspace/UCF_Crimes/kinetics_flow5000_feat/"
    prediction_folder = "/home/lnn/workspace/UCF_Crimes/kinetics_flow5000/"
    test_pred_gt_folder = "/home/lnn/data/UCF_Crimes/test_pred_groundtruth/"
    modality = "flow"
    gpu_id = 1
    iter_size = 32
    ucf_crime_train = UCFCrimeSlow(videos_pkl_train,
                                   prediction_folder,
                                   feature_folder,
                                   modality,
                                   graph_generator=soft_uniform_sampling,
                                   graph_generator_param=param)
    train_loader = DataLoader(dataset=ucf_crime_train,
                              batch_size=1,
                              shuffle=True,
                              num_workers=16)
    model = NoiseFilter(nfeat=1024, nclass=1)
    criterion_supervised = SigmoidCrossEntropyLoss()
    criterion_unsupervised = SigmoidMAELoss()
    if gpu_id != -1:
        model = model.cuda(gpu_id)
        criterion_supervised = criterion_supervised.cuda(gpu_id)
        criterion_unsupervised = criterion_unsupervised.cuda(gpu_id)
    optimizer = optim.SGD(model.parameters(),
                          lr=0.0001,
                          momentum=0.9,
                          weight_decay=0.0005)
    opt_scheduler = optim.lr_scheduler.StepLR(optimizer, 16, 0.1)
    iter_count = 0
    avg_loss_train = 0
    alpha = 0.5
    vid2mean_pred = {}
    #model.load_state_dict(torch.load("flow_9_0.6.pth"))
    for epoch in range(20):
        model.train()
        opt_scheduler.step()
        for step, data in enumerate(train_loader):
            (feat, adj, labeled_index_in_the_graph,
             labeled_index), pred, vid = data
            feat, adj, pred = Variable(feat), Variable(adj), Variable(pred)

            if not vid2mean_pred.has_key(vid[0]):
                vid2mean_pred[
                    vid[0]] = pred.data.cpu().numpy().flatten().copy()
            mean_pred = Variable(torch.from_numpy(vid2mean_pred[vid[0]]),
                                 requires_grad=False)

            if gpu_id != -1:
                feat = feat.cuda(gpu_id)
                adj = adj.cuda(gpu_id)
                pred = pred.cuda(gpu_id)
                mean_pred = mean_pred.cuda(gpu_id)

            if iter_count % iter_size == 0:
                optimizer.zero_grad()

            output = model(feat, adj)
            labeled_index_in_the_graph = np.array(
                labeled_index_in_the_graph).flatten()
            labeled_index = np.array(labeled_index).flatten()
            sample_index = get_sample_index(labeled_index, pred)

            if "Normal" in vid[0]:
                loss_train = criterion_supervised(
                    output.view(1, -1),
                    pred.view(1, -1)[:, range(output.shape[1])])
            else:
                '''
                loss_train = criterion_supervised(output.view(1, -1)[:, labeled_index_in_the_graph],
                                                  pred.view(1, -1)[:, labeled_index])
                '''
                loss_train = criterion_supervised(output.view(1, -1)[:, labeled_index_in_the_graph],
                                                  pred.view(1, -1)[:, labeled_index])+ \
                             criterion_unsupervised(output.view(1, -1),
                                                    mean_pred.view(1, -1)[:, sample_index])

            avg_loss_train += loss_train
            iter_count += 1
            loss_train.backward()

            #torch.nn.utils.clip_grad_norm(model.parameters(), 40)

            mean_pred_current = mean_pred.data.cpu().numpy().copy().flatten()
            mean_pred_current[sample_index] = sigmoid(
                output).data.cpu().numpy().copy().flatten()
            vid2mean_pred[vid[0]] = alpha * vid2mean_pred[vid[0]] + (
                1 - alpha) * mean_pred_current

            if (iter_count + 1) % iter_size == 0:
                print("Train loss: %.4f" % (avg_loss_train / iter_size))
                avg_loss_train = 0
                optimizer.step()

        torch.save(model.state_dict(), "flow_%d.pth" % epoch)
        # iter_count += 1610
        # model.load_state_dict(torch.load("%d.pth" % iter_count))
        '''
        x = range(len(gt))
        plt.scatter(x, gt, color='g')
        plt.scatter(x, ans, color='r')
        plt.show()
        '''
        print("Epoch %d done !" % epoch)
예제 #3
0
def train_gcn(param):
    torch.cuda.empty_cache()
    # videos_pkl_train = "/home/lnn/workspace/pygcn/pygcn/ucf_crime_train.pkl"
    videos_pkl_train = "/mmu_ssd/liuchang03/heyuwei/GCN-Anomaly-Detection/pygcn/my_crime_train.pkl"
    # videos_pkl_test = "/home/lnn/workspace/pygcn/pygcn/ucf_crime_test.pkl"
    # feature_folder = "/home/lnn/workspace/UCF_Crimes/c3d_features_all/_iter_4500/"
    feature_folder = "/mmu_ssd/liuchang03/heyuwei/Data/crime_c3d_feature"
    prediction_folder = "/mmu_ssd/liuchang03/heyuwei/Data/crime_c3d_score"
    # prediction_folder = "/home/lnn/workspace/UCF_Crimes/c3d_features_1_high_conf_sampling_0.6/_iter_1000/"
    test_pred_gt_folder = "/home/lnn/data/UCF_Crimes/test_pred_groundtruth/"
    modality = "c3d"
    gpu_id = 0
    iter_size = 32
    ucf_crime_train = UCFCrimeSlow(videos_pkl_train, prediction_folder, feature_folder, modality,
                               graph_generator=soft_uniform_sampling, graph_generator_param=param)
    train_loader = DataLoader(dataset=ucf_crime_train, batch_size=1, shuffle=False)
    model = NoiseFilter(nfeat=4096, nclass=1)
    criterion_supervised = SigmoidCrossEntropyLoss()
    criterion_unsupervised = SigmoidMAELoss()
    if gpu_id != -1:
        model = model.cuda(gpu_id)
        criterion_supervised = criterion_supervised.cuda(gpu_id)
        criterion_unsupervised = criterion_unsupervised.cuda(gpu_id)
    optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=0.0005)
    opt_scheduler = optim.lr_scheduler.StepLR(optimizer, 5, 0.1)
    iter_count = 0
    avg_loss_train = 0
    alpha = 0.5
    vid2mean_pred = {}
    for epoch in range(20):
        model.train()
        opt_scheduler.step()
        for step, data in enumerate(train_loader):
            (feat, adj, labeled_index_in_the_graph, labeled_index), pred, vid = data
            # print('feat shape ' + str(feat.shape))
            # print('adj shape ' + str(adj.shape))
            # print('labeled_index_in_the_graph: ' + str(labeled_index_in_the_graph))
            # print('labeled_index: ' + str(labeled_index))
            feat, adj, pred = Variable(feat), Variable(adj), Variable(pred)

            if vid[0] not in list(vid2mean_pred.keys()):
            # if not vid2mean_pred.has_key(vid[0]):
                vid2mean_pred[vid[0]] = pred.data.cpu().numpy().flatten().copy()
            mean_pred = Variable(torch.from_numpy(vid2mean_pred[vid[0]]), requires_grad=False)

            if gpu_id != -1:
                feat = feat.cuda(gpu_id)
                adj = adj.cuda(gpu_id)
                pred = pred.cuda(gpu_id)
                mean_pred = mean_pred.cuda(gpu_id)

            if iter_count % iter_size == 0:
                optimizer.zero_grad()

            output = model(feat, adj)
            labeled_index_in_the_graph = np.array(labeled_index_in_the_graph).flatten()
            labeled_index = np.array(labeled_index).flatten()
            sample_index = get_sample_index(labeled_index, pred)

            if "Normal" in vid[0]:
                loss_train = criterion_supervised(output.view(1, -1),
                                                  pred.view(1, -1)[:, range(output.shape[1])])
            else:
                # loss_train = criterion_supervised(output.view(1, -1)[:, labeled_index_in_the_graph],
                #                                   pred.view(1, -1)[:, labeled_index]) + \
                #              criterion_unsupervised(output.view(1, -1),
                #                                     mean_pred.view(1, -1)[:, sample_index])

                loss_train = criterion_supervised(output.view(1, -1)[:, labeled_index_in_the_graph].float(),
                                                  pred.view(1, -1)[:, labeled_index]).float() + \
                             criterion_unsupervised(output.view(1, -1).float(),
                                                    mean_pred.view(1, -1)[:, sample_index].float())

            avg_loss_train += loss_train
            iter_count += 1
            loss_train.backward()

            mean_pred_current = mean_pred.data.cpu().numpy().copy().flatten()
            mean_pred_current[sample_index] = sigmoid(output).data.cpu().numpy().copy().flatten()
            vid2mean_pred[vid[0]] = alpha * vid2mean_pred[vid[0]] + (1 - alpha) * mean_pred_current

            if (iter_count + 1) % iter_size == 0:
                print("Train loss: %.4f" % (avg_loss_train / iter_size))
                avg_loss_train = 0
                optimizer.step()

        torch.save(model.state_dict(), "c3d_%d.pth" % epoch)
        # iter_count += 1610
        # model.load_state_dict(torch.load("%d.pth" % iter_count))

        
        print("Epoch %d done !" % epoch)