def get_dataset(configs):
    """
    This function get raw dataset
    """
    from utils.datasets.fer2013dataset import fer2013

    # todo: add transform
    train_set = fer2013('train', configs)
    val_set = fer2013('val', configs)
    test_set = fer2013('test', configs, tta=True, tta_size=10)
    return train_set, val_set, test_set
예제 #2
0
def main():
    with open("./configs/fer2013_config.json") as f:
        configs = json.load(f)

    acc = 0.0
    state = torch.load("./saved/checkpoints/{}".format(checkpoint_name))

    from models import resmasking_dropout1

    model = resmasking_dropout1

    model = model(in_channels=3, num_classes=7).cuda()
    model.load_state_dict(state["net"])
    model.eval()

    correct = 0
    total = 0
    all_target = []
    all_output = []

    test_set = fer2013("test", configs, tta=True, tta_size=8)
    hold_test_set = fer2013("test", configs, tta=False, tta_size=0)

    with torch.no_grad():
        for idx in tqdm(range(len(test_set)), total=len(test_set),
                        leave=False):
            images, targets = test_set[idx]

            images = make_batch(images)
            images = images.cuda(non_blocking=True)

            outputs = model(images).cpu()
            outputs = F.softmax(outputs, 1)

            # outputs.shape [tta_size, 7]
            outputs = torch.sum(outputs, 0)
            outputs = torch.argmax(outputs, 0)
            outputs = outputs.item()
            targets = targets.item()
            total += 1
            if outputs != targets:
                image, target = hold_test_set[idx]
                image = image.permute(1, 2, 0).numpy() * 255
                image = image.astype(np.uint8)

                cv2.imwrite(
                    "./wrong_in_fer/{}->{}_{}.png".format(
                        class_names[target], class_names[outputs], idx),
                    image,
                )
def get_dataset(configs):
    """
    This function get raw dataset
    """
    from utils.datasets.fer2013dataset import fer2013
    from utils.datasets.videodataset import video

    if dataset_name == "fer2013":
        train_set = fer2013("train", configs)
        val_set = fer2013("val", configs)
        test_set = fer2013("test", configs, tta=True, tta_size=10)
    else:
        train_set = video("train", configs)
        val_set = video("val", configs)
        test_set = video("test", configs, tta=True, tta_size=10)
    return train_set, val_set, test_set
def main():
    with open('./configs/fer2013_config.json') as f:
        configs = json.load(f)

    for title_name, model_name, checkpoint_path in model_dict:
        acc = 0.
        state = torch.load('./saved/checkpoints/{}'.format(checkpoint_path))
        model = getattr(models, model_name)
        model = model(in_channels=3, num_classes=7)
        model.load_state_dict(state['net'])
        model.cuda()
        model.eval()
        correct = 0
        total = 0
        all_target = []
        all_output = []
        test_set = fer2013('test', configs, tta=True, tta_size=8)
        # test_set = fer2013('test', configs, tta=False, tta_size=0)

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]

                images = make_batch(images)
                images = images.cuda(non_blocking=True)

                outputs = model(images).cpu()
                outputs = F.softmax(outputs, 1)

                # outputs.shape [tta_size, 7]
                outputs = torch.sum(outputs, 0)
                outputs = torch.argmax(outputs, 0)
                outputs = outputs.item()
                targets = targets.item()
                total += 1
                correct += outputs == targets

                all_target.append(targets)
                all_output.append(outputs)
        all_target = np.array(all_target)
        all_output = np.array(all_output)
        matrix = confusion_matrix(all_target, all_output)
        np.set_printoptions(precision=2)
        plot_confusion_matrix(
            matrix,
            classes=class_names,
            normalize=True,
            # title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
            title=title_name)
        plt.savefig('./saved/cm/cm_{}.pdf'.format(checkpoint_name))
        plt.close()
예제 #5
0
def main():
    with open("../configs/fer2013_config.json") as f:
        configs = json.load(f)

    test_set = fer2013("test", configs, tta=True, tta_size=8)

    for model_name, checkpoint_path in model_dict:
        prediction_list = []  # each item is 7-ele array
        test_target = []

        print("Processing", checkpoint_path)
        # if os.path.exists("../saved/results/{}.npy".format(checkpoint_path)):
        #     continue
        model = resmasking_dropout1(in_channels=3, num_classes=7)
        # else:
        #     model = vgg19(in_channels=3, num_classes=7)

        state = torch.load(
            os.path.join("../saved/checkpoints", checkpoint_path))
        model.load_state_dict(state["net"])

        model.cuda()
        model.eval()

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]
                # images = make_batch(images)
                # images = images.cuda(non_blocking=True)

                # outputs = model(images).cpu()
                # outputs = F.softmax(outputs, 1)
                # outputs = torch.sum(outputs, 0)  # outputs.shape [tta_size, 7]

                # outputs = [round(o, 4) for o in outputs.numpy()]
                # prediction_list.append(outputs)
                test_target.append(targets)

        np.save("../saved/results/{}.npy".format(checkpoint_path),
                prediction_list)

        np.save("../saved/results/test_targets.npy", test_target)
예제 #6
0
def main():
    with open('./configs/fer2013_config.json') as f:
        configs = json.load(f)

    test_set = fer2013('test', configs, tta=True, tta_size=8)

    for model_name, checkpoint_path in model_dict:
        prediction_list = []  # each item is 7-ele array

        print("Processing", checkpoint_path)
        if os.path.exists('./saved/results/{}.npy'.format(checkpoint_path)):
            continue

        model = getattr(models, model_name)
        model = model(in_channels=3, num_classes=7)

        state = torch.load(os.path.join('saved/checkpoints', checkpoint_path))
        model.load_state_dict(state['net'])

        model.cuda()
        model.eval()

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]
                images = make_batch(images)
                images = images.cuda(non_blocking=True)

                outputs = model(images).cpu()
                outputs = F.softmax(outputs, 1)
                outputs = torch.sum(outputs, 0)  # outputs.shape [tta_size, 7]

                outputs = [round(o, 4) for o in outputs.numpy()]
                prediction_list.append(outputs)

        np.save('./saved/results/{}.npy'.format(checkpoint_path),
                prediction_list)
def main():
    with open("./configs/fer2013_config.json") as f:
        configs = json.load(f)

    acc = 0.0
    state = torch.load("./saved/checkpoints/{}".format(checkpoint_name))

    from models import resmasking_dropout1

    model = resmasking_dropout1

    model = model(in_channels=3, num_classes=7).cuda()
    model.load_state_dict(state["net"])
    model.eval()

    correct = 0
    total = 0
    all_target = []
    all_output = []

    test_set = fer2013("test", configs, tta=True, tta_size=8)
    # test_set = fer2013('test', configs, tta=False, tta_size=0)

    with torch.no_grad():
        for idx in tqdm(range(len(test_set)), total=len(test_set), leave=False):
            images, targets = test_set[idx]

            images = make_batch(images)
            images = images.cuda(non_blocking=True)

            outputs = model(images).cpu()
            outputs = F.softmax(outputs, 1)

            # outputs.shape [tta_size, 7]
            outputs = torch.sum(outputs, 0)
            outputs = torch.argmax(outputs, 0)
            outputs = outputs.item()
            targets = targets.item()
            total += 1
            correct += outputs == targets

            all_target.append(targets)
            all_output.append(outputs)

    # acc = 100. * correct / total
    # print("Accuracy {:.03f}".format(acc))

    all_target = np.array(all_target)
    all_output = np.array(all_output)

    matrix = confusion_matrix(all_target, all_output)
    np.set_printoptions(precision=2)

    # plt.figure(figsize=(5, 5))
    plot_confusion_matrix(
        matrix,
        classes=class_names,
        normalize=True,
        # title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
        title="Residual Masking Network",
    )

    # plt.show()
    # plt.savefig('cm_{}.png'.format(checkpoint_name))
    plt.savefig("./saved/cm/cm_{}.pdf".format(checkpoint_name))
    plt.close()
    print("save at ./saved/cm/cm_{}.pdf".format(checkpoint_name))
예제 #8
0
def main():
    with open("./configs/" + dataset_name + "_config.json") as f:
        configs = json.load(f)
    data_path = configs["data_path"]

    for i in range(0, configs["k-fold"]):
        if best_checkpoint_selection == 1 and configs["k-fold"] == 1:
            checkpoint_name = check_name
        else:
            checkpoint_name = check_name.format(i + 1)

        check_log_name = checkpoint_name + "_" + test_name

        configs["data_path"] = data_path + "/fold_" + str(i + 1)

        if dataset_name == "video":
            data_vectors = pd.read_csv(
                os.path.join(configs["data_path"], "test.csv"))
            image_name_vector = data_vectors["image_name"].tolist()

        acc = 0.0
        state = torch.load("./saved/checkpoints/{}".format(checkpoint_name))

        from models import vgg19

        model = vgg19

        model = model(in_channels=3, num_classes=7).cuda()
        model.load_state_dict(state["net"])
        model.eval()

        correct = 0
        total = 0
        all_target = []
        all_output = []

        if dataset_name == "fer2013":
            test_set = fer2013("test", configs, tta=True, tta_size=10)
            # test_set = fer2013('test', configs, tta=False, tta_size=0)
        else:
            test_set = video("test", configs, tta=True, tta_size=10)

        if best_checkpoint_selection == 1 and configs["k-fold"] == 1:
            print("Testing on private test with tta..".format(i + 1))
        else:
            print("Testing fold {} on private test with tta..".format(i + 1))

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]

                images = make_batch(images)
                images = images.cuda(non_blocking=True)

                outputs = model(images).cpu()
                outputs = F.softmax(outputs, 1)

                # outputs.shape [tta_size, 7]
                outputs = torch.sum(outputs, 0)
                outputs = torch.argmax(outputs, 0)
                outputs = outputs.item()
                targets = targets.item()
                total += 1
                correct += outputs == targets

                all_target.append(targets)
                all_output.append(outputs)

                # if len(np.unique(all_target)) == 7:
                #     break
                # if idx == 10:
                #     break

        acc = 100. * correct / total
        #print("Accuracy on private test with tta: {:.3f}".format(acc))

        all_target = np.array(all_target)
        all_output = np.array(all_output)

        matrix = confusion_matrix(all_target, all_output)
        np.set_printoptions(precision=2)

        if best_checkpoint_selection == 1 and configs["k-fold"] == 1:
            validated = "1"
        else:
            validated = "all_results"

        log_name = check_test_name.format(validated)

        if not best_checkpoint_selection == 1 and configs[
                "k-fold"] != 1 and cm_normalization == True:
            Log("Test fold {}\n\n".format(i + 1),
                "./saved/results/{}.txt".format(log_name))

        # plt.figure(figsize=(5, 5))
        plot_confusion_matrix(
            matrix,
            classes=class_names,
            normalize=cm_normalization,
            # title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
            title="Vgg19",
            log_name=log_name,
            check_log_name=check_log_name)

        class_report = classification_report(all_target,
                                             all_output,
                                             target_names=class_names)
        print("Classification report")
        print(class_report)
        Log("\n\nClassification report\n",
            "./saved/results/{}.txt".format(log_name))
        Log(class_report, "./saved/results/{}.txt".format(log_name))
        Log("\n\n", "./saved/results/{}.txt".format(log_name))

        if dataset_name == "video" and best_checkpoint_selection == 1:
            t_image_name_vector = ["Image names"]
            t_all_target = ["true"]
            t_all_output = ["predicted"]
            for j in range(0, len(image_name_vector)):
                t_image_name_vector.append(image_name_vector[j])
                t_all_target.append(str(all_target[j]))
                t_all_output.append(str(all_output[j]))
            t = PrettyTable(t_image_name_vector)
            t.add_row(t_all_target)
            t.add_row(t_all_output)
            print("Image predictions")
            print(t)
            Log("Image predictions\n",
                "./saved/results/{}.txt".format(log_name))
            Log(t, "./saved/results/{}.txt".format(log_name))
            Log("\n\n", "./saved/results/{}.txt".format(log_name))