Ejemplo n.º 1
0
def main():
    global args, start_loss, distance, best_prec1
    set_seed()
    args = parser.parse_args()

    # Multiple GPU training , load device
    os.environ["CUDA_VISIBLE_DEVICES"] = '2,3'  # 想使用的GPU编号
    device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")  # GPU开始编号,依次往下
    # DDP
    torch.distributed.init_process_group(backend="nccl",
                                         init_method='tcp://localhost:23469',
                                         rank=0,
                                         world_size=1)

    print("load into device:", device)

    # init visual feature extractor
    featureExtractor = VGG_Face()
    featureExtractor = load_GPUS(
        featureExtractor,
        '/mnt/nfs-shared/xinda/Werewolf-XL/werewolf_video/vgg16_face_30.pth')
    featureExtractor = nn.DataParallel(featureExtractor)
    featureExtractor.to(device)
    print('load visual feature extractor success')

    # -----------------
    # Cross Validation
    # -----------------
    average_acc_test, average_f1_test, average_auc_test = [], [], []
    for i in range(4, 5):  #
        # Load Data
        train_dataset = PNN_TrainFileDataset.FlameSet_Train(
            i)  # for training 0.7
        valid_dataset = PNN_ValidFileDatabase.FlameSet_Valid(
            i)  # for test  0.3
        test_dataset = PNN_TestFileDatabase.FlameSet_Test(i)  # for test  0.3

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.workers,
            pin_memory=False
        )  # pin_memory: 如果True,数据加载器会在返回之前将Tensors复制到CUDA固定内存

        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            batch_size=args.valid_batch_size,
            shuffle=False,
            num_workers=args.workers,
            pin_memory=False)

        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=args.test_batch_size,
            shuffle=False,
            num_workers=args.workers,
            pin_memory=False)
        # load LSTM model, 多GPU计算
        model = FineTuneLstmModel()

        model = model.to(device)
        model = nn.parallel.DistributedDataParallel(
            model, find_unused_parameters=True)

        # loss criterion and optimizer
        train_y = pd.read_csv(
            f'/mnt/nfs-shared/xinda/Werewolf-XL/Werewolf-XL_202106/2_LSTM/LSTM/Split_dataset/CV_Features/ClassificationFeatures/Train_CV_{i}.csv'
        ).iloc[:, 5]
        encoder = LabelEncoder().fit(
            train_y)  # #训练LabelEncoder, 把y_train中的类别编码为0,1,2,3,4,5
        y = encoder.transform(train_y)
        y_train = pd.DataFrame(
            encoder.transform(train_y))  # 使用训练好的LabelEncoder对源数据进行编码
        class_weights = torch.tensor(
            list(compute_class_weight('balanced', np.unique(y_train),
                                      y_train))).float()
        print("Class Weight = ", class_weights)
        criterion = nn.CrossEntropyLoss(weight=class_weights)
        criterion = criterion.to(device)  # 并行化损失函数
        # optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

        # Training on epochs
        avg_train_loss = []
        avg_valid_loss = []

        # initialize the early_stopping object
        early_stopping = EarlyStopping(
            patience=args.patience,
            verbose=True,
            path=f"./checkpoints/20210628/early_stopping_checkpoint_CV_{i}.pt")

        # Training on epochs
        for epoch in range(args.start_epochs, args.epochs):
            # tst
            # if epoch ==1:
            #     # -------------------
            #     # Test the model
            #     # -------------------
            #     print(">>>>> Test Model")
            #     model.load_state_dict(torch.load(f"./checkpoints/early_stopping_checkpoint_CV_{i}.pt"))
            #     test_result = test(test_loader, featureExtractor, model, epoch=epoch, device=device, cv=i)
            #     average_acc_test.append(test_result[0])
            #     average_f1_test.append(test_result[1])
            #     average_auc_test.append(test_result[2])
            #     print(
            #         f"CV {i}/10 >>>>LSTM Classificaiton Average Results of Arousal: ACC.= {np.average(average_acc_test)},\
            #         F1 Score = {np.average(average_f1_test)}, AUC = {np.average(average_auc_test)}")
            #
            #     loss = valid(valid_loader, featureExtractor, model, criterion, optimizer, epoch, device, cv=i)
            #     avg_valid_loss.append(loss)
            #     # save model
            #     torch.save(model, './checkpoints/Arousal_lstm_loss_' + str("%.4f" % loss) + '.pth')
            #
            #     # early_stopping needs the validation loss to check if it has decresed,
            #     # and if it has, it will make a checkpoint of the current model
            #     early_stopping(loss, model)
            #     if early_stopping.early_stop:
            #         print("Early stopping")
            #         break

            # -------------------
            # train the model
            # -------------------
            avg_train_loss_item = train(train_loader,
                                        featureExtractor,
                                        model,
                                        criterion,
                                        optimizer,
                                        epoch,
                                        device,
                                        cv=i)
            avg_train_loss.append(avg_train_loss_item)

            # -------------------
            # Validate the model
            # -------------------
            if epoch % 5 == 0:
                loss = valid(valid_loader,
                             featureExtractor,
                             model,
                             criterion,
                             optimizer,
                             epoch,
                             device,
                             cv=i)
                avg_valid_loss.append(loss)
                # save model
                # torch.save(model, f'./checkpoints/20210628/CV_{i}_Classification_lstm_loss_' + str("%.4f" % loss) + '.pth')

                # early_stopping needs the validation loss to check if it has decresed,
                # and if it has, it will make a checkpoint of the current model
                early_stopping(loss, model)
                if early_stopping.early_stop:
                    print("Early stopping")
                    # torch.save(model, f'./checkpoints/CV_{i}_Classification_lstm_loss_{str("%.4f" % loss)}_earlyStopping_{epoch}.pth')
                    break

                    # -------------------
                    # Test the model
                    # -------------------
        model.load_state_dict(
            torch.load(
                f"./checkpoints/20210628/early_stopping_checkpoint_CV_{i}.pt"))
        test_result = test(test_loader,
                           featureExtractor,
                           model,
                           epoch,
                           device,
                           cv=i)
        average_acc_test.append(test_result[0])
        average_f1_test.append(test_result[1])
        average_auc_test.append(test_result[2])
        print(
            f"CV {i}/10 >>>>LSTM Classificaiton Average Results of Arousal: ACC.= {np.average(average_acc_test)},\
            F1 Score = {np.average(average_f1_test)}, AUC = {np.average(average_auc_test)}"
        )
        logger.info(
            f"CV {i}/10 LSTM Regression Average Results of Arousal: RMSE.= {np.average(average_acc_test)},\
            F1 Score = {np.average(average_f1_test)}, AUC = {np.average(average_auc_test)}"
        )
def main():
    global args, start_loss, distance, best_prec1
    set_seed()
    args = parser.parse_args()

    # Load Data
    test_dataset = PNN_TestFileDatabase.FlameSet()  # for test  0.3
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=False)

    # Multiple GPU training , load device
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 想使用的GPU编号
    device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")  # GPU开始编号,依次往下
    print("load into device:", device)

    # init visual feature extractor
    featureExtractor = VGG_Face()
    featureExtractor = load_GPUS(
        featureExtractor,
        '/mnt/nfs-shared/xinda/Werewolf-XL/werewolf_video/vgg16_face_30.pth')
    featureExtractor = nn.DataParallel(featureExtractor)
    featureExtractor.to(device)
    print('load visual feature extractor success')

    # load LSTM model, 多GPU计算
    model = FineTuneLstmModel()
    # model_CKPT = torch.load('/home/xinda/werewolf_lstm/werewolf_video/PNN_saved_model/PNN_LSTM_50.pth')
    model_CKPT = torch.load(
        '/mnt/nfs-shared/xinda/Werewolf-XL/werewolf-XL_202103/Classification/checkpoints/PNN_LSTM_2layer_params_25_69.9546.pth'
    )
    model.load_state_dict(model_CKPT)
    model.to(device)
    model.eval()

    with torch.no_grad():
        groundTruth = []
        prediction_max = []
        prediction_prob = []
        test_correct = 0
        test_total = 0
        for i, videoFrames in enumerate(tqdm(test_loader)):
            label = videoFrames['label'].to(device)
            videoFrames = torch.squeeze(videoFrames['videoFrames']).to(device)
            length = videoFrames.shape[0]
            Outputs = []

            if length < 16:
                lack = 16 - length
                repeat_frames = videoFrames[-1:, ...].repeat(lack, 1, 1, 1)
                videoFrames = torch.cat((videoFrames, repeat_frames), 0)

            circle = int(length / 8) - 1
            for k in range(circle):
                start = 0 + 8 * k
                end = 16 + 8 * k
                features = featureExtractor(videoFrames[start:end,
                                                        ...].float())
                output, hidden = model(features.unsqueeze(0))
                output_mean = torch.mean(output,
                                         dim=0)  # one serie of frames = 16
                Outputs.append(output_mean.data.cpu().numpy().tolist()
                               )  # All series of frames

            Outputs = torch.Tensor(Outputs)

            if Outputs.shape[0] > 1:
                outputs_average = torch.mean(Outputs, dim=0).unsqueeze(
                    0)  # average of All series' output

            groundTruth.append(label.item())
            _, predicted = torch.max(outputs_average.data, 1)
            prediction_max.append(predicted.item())
            prediction_prob_b = F.softmax(outputs_average.data)
            prediction_prob.append(
                prediction_prob_b.data.numpy().reshape(6).tolist())

            test_total += label.size(0)
            test_correct += (predicted == label.data.cpu()).sum().item()

            if i % 500 == 0:
                print("ground truth.length =", len(groundTruth))
                print("prediction.length =", len(prediction_max))

                accuracy = accuracy_score(prediction_max, groundTruth)
                print("accuracy = ", accuracy)
                f1 = f1_score(prediction_max, groundTruth, average="weighted")
                print("f1", f1)
                label = label_binarize(groundTruth, classes=list(range(6)))
                print("AUC = ",
                      roc_auc_score(label, prediction_prob, average='micro'))

                test_accuracy = 100 * test_correct / test_total

                print('Accuracy of the network on the Test images: %d' %
                      (test_accuracy))

        accuracy = accuracy_score(prediction_max, groundTruth)
        print("accuracy = ", accuracy)
        f1 = f1_score(prediction_max, groundTruth, average="weighted")
        print("prediction_max", f1)
        label = label_binarize(groundTruth, classes=list(range(6)))
        print("AUC = ", roc_auc_score(label, prediction_prob, average='micro'))

        test_accuracy = 100 * test_correct / test_total

        print('Accuracy of the network on the Test images: %d' %
              (test_accuracy))
        df = pd.DataFrame(data={
            "pnn_prediction": prediction_max,
            "pnn_groundtruth": groundTruth
        })
        df.to_csv("eval_pnn.csv")

        pro = np.array(prediction_prob)
        df2 = pd.DataFrame(pro)
        df2.to_csv(
            "/mnt/nfs-shared/xinda/Werewolf-XL/werewolf-XL_202103/Classification/prediction_result/categorical_lstm_6pnn_202103.csv"
        )
Ejemplo n.º 3
0
    torch.distributed.init_process_group(backend="nccl",
                                         init_method='tcp://localhost:23468',
                                         rank=0,
                                         world_size=1)

    print("load into device:", device)

    # init visual feature extractor
    featureExtractor = VGG_Face()
    featureExtractor = load_GPUS(
        featureExtractor,
        '/mnt/nfs-shared/xinda/Werewolf-XL/werewolf_video/vgg16_face_30.pth')
    featureExtractor = nn.DataParallel(featureExtractor)
    featureExtractor.to(device)
    print('load visual feature extractor success')

    test_dataset = PNN_TestFileDatabase.FlameSet_Test(4)  # for test  0.3
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=False)
    # load LSTM model, 多GPU计算
    model = FineTuneLstmModel()
    model = model.to(device)
    model = nn.parallel.DistributedDataParallel(model,
                                                find_unused_parameters=True)
    model.load_state_dict(
        torch.load(f"./checkpoints/CV_4_Classification_lstm_loss_1.3821.pth"))
    test_result = test(test_loader, featureExtractor, model, 999, device, cv=4)
Ejemplo n.º 4
0
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # GPU开始编号,依次往下
    # DDP
    torch.distributed.init_process_group(backend="nccl", init_method='tcp://localhost:23468', rank=0, world_size=1)

    print("load into device:", device)

    # init visual feature extractor
    featureExtractor = VGG_Face()
    featureExtractor = load_GPUS(featureExtractor, '/mnt/nfs-shared/xinda/Werewolf-XL/werewolf_video/vgg16_face_30.pth')
    featureExtractor = nn.DataParallel(featureExtractor)
    featureExtractor.to(device)
    print('load visual feature extractor success')


    # load LSTM model, 多GPU计算
    model = FineTuneLstmModel()
    model = model.to(device)
    model = nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)

    average_acc_test, average_f1_test, average_auc_test = [], [], []
    for i in range(1, 11):
        test_dataset = PNN_TestFileDatabase.FlameSet_Test(i)  # for test  0.3
        test_loader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=args.test_batch_size, shuffle=False,
                                                  num_workers=args.workers, pin_memory=False)

        model.load_state_dict(torch.load(f"./checkpoints/early_stopping_checkpoint_CV_{i}.pt"))
        test_result = test(test_loader, featureExtractor, model, 629, device, cv=i)
        average_acc_test.append(test_result[0])
        average_f1_test.append(test_result[1])
        average_auc_test.append(test_result[2])