示例#1
0
def main():
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True

    test_dir = "./input/deepfake-detection-challenge/test_videos"
    csv_path = "./input/deepfake-detection-challenge/sample_submission.csv"

    face_detector = FaceDetector()
    face_detector.load_checkpoint(
        "./input/dfdc-pretrained-2/RetinaFace-Resnet50-fixed.pth")
    loader = DFDCLoader(test_dir, face_detector, T.ToTensor())

    model1 = xception(num_classes=2, pretrained=False)
    ckpt = torch.load("./input/dfdc-pretrained-2/xception-hg-2.pth",
                      map_location=torch.device('cpu'))
    model1.load_state_dict(ckpt["state_dict"])
    model1 = model1.cpu()
    model1.eval()

    model2 = WSDAN(num_classes=2, M=8, net="xception", pretrained=False).cpu()
    ckpt = torch.load("./input/dfdc-pretrained-2/ckpt_x.pth",
                      map_location=torch.device('cpu'))
    model2.load_state_dict(ckpt["state_dict"])
    model2.eval()

    model3 = WSDAN(num_classes=2, M=8, net="efficientnet",
                   pretrained=False).cpu()
    ckpt = torch.load("./input/dfdc-pretrained-2/ckpt_e.pth",
                      map_location=torch.device('cpu'))
    model3.load_state_dict(ckpt["state_dict"])
    model3.eval()

    zhq_nm_avg = torch.Tensor([.4479, .3744, .3473]).view(1, 3, 1, 1).cpu()
    zhq_nm_std = torch.Tensor([.2537, .2502, .2424]).view(1, 3, 1, 1).cpu()

    for batch in loader:
        batch = batch.cpu()

        i1 = F.interpolate(batch, size=299, mode="bilinear")
        i1.sub_(0.5).mul_(2.0)
        o1 = model1(i1).softmax(-1)[:, 1].cpu().numpy()

        i2 = (batch - zhq_nm_avg) / zhq_nm_std
        o2, _, _ = model2(i2)
        o2 = o2.softmax(-1)[:, 1].cpu().numpy()

        i3 = F.interpolate(i2, size=300, mode="bilinear")
        o3, _, _ = model3(i3)
        o3 = o3.softmax(-1)[:, 1].cpu().numpy()

        out = 0.2 * o1 + 0.7 * o2 + 0.1 * o3
        loader.feedback(out)

    with open(csv_path) as fin, open("submission.csv", "w") as fout:
        fout.write(next(fin))
        for line in fin:
            fname = line.split(",", 1)[0]
            pred = loader.score[fname]
            print("%s,%.6f" % (fname, pred), file=fout)
示例#2
0
def main():
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True

    # test_dir = "../input/deepfake-detection-challenge/test_videos"
    # csv_path = "../input/deepfake-detection-challenge/sample_submission.csv"
    test_dir = "./data/train_sample_videos"
    csv_path = "./dfdc-data.csv"

    face_detector = FaceDetector()
    # face_detector.load_checkpoint("../input/dfdc-pretrained-2/RetinaFace-Resnet50-fixed.pth")
    face_detector.load_checkpoint("./RetinaFace-Resnet50-fixed.pth")
    loader = DFDCLoader(test_dir, face_detector, T.ToTensor())

    model1 = xception(num_classes=2, pretrained=False)
    # ckpt = torch.load("../input/dfdc-pretrained-2/xception-hg-2.pth")
    ckpt = torch.load("./xception-hg-2.pth")
    model1.load_state_dict(ckpt["state_dict"])
    model1 = model1.cuda()
    model1.eval()

    model2 = WSDAN(num_classes=2, M=8, net="xception", pretrained=False).cuda()
    # ckpt = torch.load("../input/dfdc-pretrained-2/ckpt_x.pth")
    ckpt = torch.load("./ckpt_x.pth")
    model2.load_state_dict(ckpt["state_dict"])
    model2.eval()

    model3 = WSDAN(num_classes=2, M=8, net="efficientnet",
                   pretrained=False).cuda()
    # ckpt = torch.load("../input/dfdc-pretrained-2/ckpt_e.pth")
    ckpt = torch.load("./ckpt_e.pth")
    model3.load_state_dict(ckpt["state_dict"])
    model3.eval()

    zhq_nm_avg = torch.Tensor([.4479, .3744, .3473]).view(1, 3, 1, 1).cuda()
    zhq_nm_std = torch.Tensor([.2537, .2502, .2424]).view(1, 3, 1, 1).cuda()

    for batch in loader:
        batch = batch.cuda(non_blocking=True)
        i1 = F.interpolate(batch, size=299, mode="bilinear")
        i1.sub_(0.5).mul_(2.0)
        o1 = model1(i1).softmax(-1)[:, 1].cpu().numpy()

        i2 = (batch - zhq_nm_avg) / zhq_nm_std
        o2, _, _ = model2(i2)
        o2 = o2.softmax(-1)[:, 1].cpu().numpy()

        i3 = F.interpolate(i2, size=300, mode="bilinear")
        o3, _, _ = model3(i3)
        o3 = o3.softmax(-1)[:, 1].cpu().numpy()

        out = 0.2 * o1 + 0.7 * o2 + 0.1 * o3
        loader.feedback(out)

    with open("submission.csv", "w") as fout:
        for fname in loader.file_list:
            pred = loader.score[fname]
            start_time = loader.infer_start[fname]
            end_time = loader.infer_end[fname]
            print("%s\t%.6f\t%d\t%d" %
                  (fname.split('.')[0], pred, start_time, end_time),
                  file=fout)
示例#3
0
def main():
    torch.backends.cudnn.benchmark = True

    train_dataset = DFDCDataset(config.data_list,
                                "train",
                                config.data_root,
                                transform=train_transform)
    val_dataset = DFDCDataset(config.data_list,
                              "val",
                              config.data_root,
                              transform=val_transform,
                              stable=True)

    kwargs = dict(batch_size=config.batch_size,
                  num_workers=config.num_workers,
                  shuffle=True,
                  pin_memory=True)
    train_loader = DataLoader(train_dataset, **kwargs)
    val_loader = DataLoader(val_dataset, **kwargs)

    # Model initialization
    model = xception(num_classes=2, pretrained=None)

    if hasattr(config, "resume") and os.path.isfile(config.resume):
        ckpt = torch.load(config.resume, map_location="cpu")
        start_epoch = ckpt.get("epoch", 0)
        best_acc = ckpt.get("acc1", 0.0)
        model.load_state_dict(ckpt["state_dict"])
    else:
        start_epoch = 0
        best_acc = 0.0

    model = model.cuda()
    model = nn.DataParallel(model)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          0.01,
                          momentum=0.9,
                          weight_decay=1e-4)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.2)

    os.makedirs(config.save_dir, exist_ok=True)

    for epoch in range(config.n_epoches):
        if epoch < start_epoch:
            scheduler.step()
            continue

        print("Epoch {}".format(epoch + 1))

        model.train()

        loss_record = []
        acc_record = []

        for count, (inputs, labels) in enumerate(train_loader):
            inputs = inputs.cuda(non_blocking=True)
            labels = labels.cuda(non_blocking=True)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_loss = loss.item()
            loss_record.append(iter_loss)

            preds = torch.argmax(outputs.data, 1)
            iter_acc = torch.sum(preds == labels).item() / len(preds)
            acc_record.append(iter_acc)

            if count and count % 100 == 0:
                print("T-Iter %d: loss=%.4f, acc=%.4f" %
                      (count, iter_loss, iter_acc))

        epoch_loss = np.mean(loss_record)
        epoch_acc = np.mean(acc_record)
        print("Training: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))

        model.eval()
        loss_record = []
        acc_record = []

        with torch.no_grad():
            for count, (inputs, labels) in enumerate(val_loader):
                inputs = inputs.cuda(non_blocking=True)
                labels = labels.cuda(non_blocking=True)

                outputs = model(inputs)
                preds = torch.argmax(outputs, 1)

                loss = criterion(outputs, labels)

                iter_loss = loss.item()
                loss_record.append(iter_loss)

                preds = torch.argmax(outputs.data, 1)
                iter_acc = torch.sum(preds == labels).item() / len(preds)
                acc_record.append(iter_acc)

                if count and count % 100 == 0:
                    print("V-Iter %d: loss=%.4f, acc=%.4f" %
                          (count, iter_loss, iter_acc))

            epoch_loss = np.mean(loss_record)
            epoch_acc = np.mean(acc_record)
            print("Validation: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))

            scheduler.step()
            ckpt_path = os.path.join(config.save_dir, "ckpt-%d.pth" % epoch)
            save_checkpoint(ckpt_path,
                            model.state_dict(),
                            epoch=epoch + 1,
                            acc1=epoch_acc)

            if epoch_acc > best_acc:
                print("Best accuracy!")
                shutil.copy(ckpt_path, os.path.join(config.save_dir,
                                                    "best.pth"))
                best_acc = epoch_acc

            print()