def main():
    with open('./configs/fer2013_config.json') as f:
        configs = json.load(f)

    for title_name, model_name, checkpoint_path in model_dict:
        acc = 0.
        state = torch.load('./saved/checkpoints/{}'.format(checkpoint_path))
        model = getattr(models, model_name)
        model = model(in_channels=3, num_classes=7)
        model.load_state_dict(state['net'])
        model.cuda()
        model.eval()
        correct = 0
        total = 0
        all_target = []
        all_output = []
        test_set = fer2013('test', configs, tta=True, tta_size=8)
        # test_set = fer2013('test', configs, tta=False, tta_size=0)

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]

                images = make_batch(images)
                images = images.cuda(non_blocking=True)

                outputs = model(images).cpu()
                outputs = F.softmax(outputs, 1)

                # outputs.shape [tta_size, 7]
                outputs = torch.sum(outputs, 0)
                outputs = torch.argmax(outputs, 0)
                outputs = outputs.item()
                targets = targets.item()
                total += 1
                correct += outputs == targets

                all_target.append(targets)
                all_output.append(outputs)
        all_target = np.array(all_target)
        all_output = np.array(all_output)
        matrix = confusion_matrix(all_target, all_output)
        np.set_printoptions(precision=2)
        plot_confusion_matrix(
            matrix,
            classes=class_names,
            normalize=True,
            # title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
            title=title_name)
        plt.savefig('./saved/cm/cm_{}.pdf'.format(checkpoint_name))
        plt.close()
Example #2
0
def main():
    with open("./configs/fer2013_config.json") as f:
        configs = json.load(f)

    acc = 0.0
    state = torch.load("./saved/checkpoints/{}".format(checkpoint_name))

    from models import resmasking_dropout1

    model = resmasking_dropout1

    model = model(in_channels=3, num_classes=7).cuda()
    model.load_state_dict(state["net"])
    model.eval()

    correct = 0
    total = 0
    all_target = []
    all_output = []

    test_set = fer2013("test", configs, tta=True, tta_size=8)
    hold_test_set = fer2013("test", configs, tta=False, tta_size=0)

    with torch.no_grad():
        for idx in tqdm(range(len(test_set)), total=len(test_set),
                        leave=False):
            images, targets = test_set[idx]

            images = make_batch(images)
            images = images.cuda(non_blocking=True)

            outputs = model(images).cpu()
            outputs = F.softmax(outputs, 1)

            # outputs.shape [tta_size, 7]
            outputs = torch.sum(outputs, 0)
            outputs = torch.argmax(outputs, 0)
            outputs = outputs.item()
            targets = targets.item()
            total += 1
            if outputs != targets:
                image, target = hold_test_set[idx]
                image = image.permute(1, 2, 0).numpy() * 255
                image = image.astype(np.uint8)

                cv2.imwrite(
                    "./wrong_in_fer/{}->{}_{}.png".format(
                        class_names[target], class_names[outputs], idx),
                    image,
                )
    def _calc_acc_on_private_test_with_tta(self):
        self._model.eval()
        test_acc = 0.0
        preds = []
        print("Calc acc on private test with tta..")
        f = open(
            "private_test_log_{}_{}.txt".format(self._configs["arch"],
                                                self._configs["model_name"]),
            "w",
        )

        with torch.no_grad():
            for idx in tqdm(range(len(self._test_set)),
                            total=len(self._test_set),
                            leave=False):
                images = self._test_set[idx]
                # targets = torch.LongTensor([targets])

                images = make_batch(images)
                images = images.cuda(non_blocking=True)
                # targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                outputs = F.softmax(outputs, 1)
                preds += torch.argmax(outputs, dim=1).cpu().numpy().tolist()

                # outputs.shape [tta_size, 7]
                # outputs = torch.sum(outputs, 0)

                # outputs = torch.unsqueeze(outputs, 0)
                # print(outputs.shape)
                # TODO: try with softmax first and see the change
                # acc = accuracy(outputs, targets)[0]
                # test_acc += acc.item()
                # f.writelines("{}_{}\n".format(idx, acc.item()))

            # test_acc = test_acc / (idx + 1)
        # print("Accuracy on private test with tta: {:.3f}".format(test_acc))
        f.close()
        import pandas as pd
        pd.DataFrame(preds).to_csv(self._configs['data_path'] + '/' +
                                   self._configs['arch'] + '.csv')
        print('test_output saved')
        return test_acc
Example #4
0
def main():
    with open('./configs/fer2013_config.json') as f:
        configs = json.load(f)

    test_set = fer2013('test', configs, tta=True, tta_size=8)

    for model_name, checkpoint_path in model_dict:
        prediction_list = []  # each item is 7-ele array

        print("Processing", checkpoint_path)
        if os.path.exists('./saved/results/{}.npy'.format(checkpoint_path)):
            continue

        model = getattr(models, model_name)
        model = model(in_channels=3, num_classes=7)

        state = torch.load(os.path.join('saved/checkpoints', checkpoint_path))
        model.load_state_dict(state['net'])

        model.cuda()
        model.eval()

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]
                images = make_batch(images)
                images = images.cuda(non_blocking=True)

                outputs = model(images).cpu()
                outputs = F.softmax(outputs, 1)
                outputs = torch.sum(outputs, 0)  # outputs.shape [tta_size, 7]

                outputs = [round(o, 4) for o in outputs.numpy()]
                prediction_list.append(outputs)

        np.save('./saved/results/{}.npy'.format(checkpoint_path),
                prediction_list)
    def _calc_acc_on_private_test_with_tta(self):
        self._model.eval()
        test_acc = 0.0
        print("Calc acc on private test with tta..")
        f = open(
            "private_test_log_{}_{}.txt".format(
                self._configs["arch"], self._configs["model_name"]
            ),
            "w",
        )

        with torch.no_grad():
            for idx in tqdm(
                range(len(self._test_set)), total=len(self._test_set), leave=False
            ):
                images, targets = self._test_set[idx]
                targets = torch.LongTensor([targets])

                images = make_batch(images)
                images = images.cuda(non_blocking=True)
                targets = targets.cuda(non_blocking=True)

                outputs = self._model(images)
                outputs = F.softmax(outputs, 1)

                # outputs.shape [tta_size, 7]
                outputs = torch.sum(outputs, 0)

                outputs = torch.unsqueeze(outputs, 0)
                # print(outputs.shape)
                # TODO: try with softmax first and see the change
                acc = accuracy(outputs, targets)[0]
                test_acc += acc.item()
                f.writelines("{}_{}\n".format(idx, acc.item()))

            test_acc = test_acc / (idx + 1)
        print("Accuracy on private test with tta: {:.3f}".format(test_acc))
        f.close()
        return test_acc
def main(model_name, idx):
    model = getattr(models, model_name)
    model = model(in_channels=3, num_classes=7)
    state = torch.load(
        '/home/aditya/Downloads/checkpoints2/cbam_resnet50__n_2020Jun24_13.32')
    model.load_state_dict(state['net'])
    model.cuda()
    model.eval()

    test_set = pd.read_csv('saved/data/jaffe/test.csv')
    path_list = test_set['filepath'].to_list()
    emotions = test_set['emotions'].to_list()
    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ToTensor(),
    ])
    image = cv2.imread('saved/data/jaffe/' + path_list[idx])
    face_cascade = cv2.CascadeClassifier(
        'saved/xml/haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.1, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
        r = max(w, h) / 2
        centerx = x + w / 2
        centery = y + h / 2
        nx = int(centerx - r)
        ny = int(centery - r)
        nr = int(r * 2)
        image = image[ny:ny + nr, nx:nx + nr]
    pix_val = cv2.resize(image, (224, 224))[:, :, 0]
    #covering parts of image
    size = [range(0, 56), range(56, 112), range(112, 168), range(168, 224)]
    l = len(size)
    flag = 1
    ip = np.zeros((1, 224, 224, 1))
    for i in range(l):
        for j in range(l):
            test = np.copy(pix_val)
            for r in size[i]:
                for c in size[j]:
                    test[r][c] = 0
            test = test.reshape(1, 224, 224, 1)
            test = test.reshape(224, 224)
            test = np.dstack([test] * 3)
            test_orig = test
            with torch.no_grad():
                test = transform(test)
                test = make_batch(test)
                test = test.cuda(non_blocking=True)
                outputs = model(test).cpu()
                outputs = F.softmax(outputs, 1)
            plt.imshow(test_orig, interpolation='nearest')
            plt.show()
            print('Predicted class = ',
                  EMOTION_DICT[torch.argmax(outputs).item()],
                  ' with probability = ',
                  outputs[0][torch.argmax(outputs).item()].item())
            if flag == 1:
                ip = np.copy(test_orig)
                flag = 0
            else:
                ip = np.concatenate([ip, test_orig])
    torch.cuda.empty_cache()
def main():
    with open("./configs/fer2013_config.json") as f:
        configs = json.load(f)

    acc = 0.0
    state = torch.load("./saved/checkpoints/{}".format(checkpoint_name))

    from models import resmasking_dropout1

    model = resmasking_dropout1

    model = model(in_channels=3, num_classes=7).cuda()
    model.load_state_dict(state["net"])
    model.eval()

    correct = 0
    total = 0
    all_target = []
    all_output = []

    test_set = fer2013("test", configs, tta=True, tta_size=8)
    # test_set = fer2013('test', configs, tta=False, tta_size=0)

    with torch.no_grad():
        for idx in tqdm(range(len(test_set)), total=len(test_set), leave=False):
            images, targets = test_set[idx]

            images = make_batch(images)
            images = images.cuda(non_blocking=True)

            outputs = model(images).cpu()
            outputs = F.softmax(outputs, 1)

            # outputs.shape [tta_size, 7]
            outputs = torch.sum(outputs, 0)
            outputs = torch.argmax(outputs, 0)
            outputs = outputs.item()
            targets = targets.item()
            total += 1
            correct += outputs == targets

            all_target.append(targets)
            all_output.append(outputs)

    # acc = 100. * correct / total
    # print("Accuracy {:.03f}".format(acc))

    all_target = np.array(all_target)
    all_output = np.array(all_output)

    matrix = confusion_matrix(all_target, all_output)
    np.set_printoptions(precision=2)

    # plt.figure(figsize=(5, 5))
    plot_confusion_matrix(
        matrix,
        classes=class_names,
        normalize=True,
        # title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
        title="Residual Masking Network",
    )

    # plt.show()
    # plt.savefig('cm_{}.png'.format(checkpoint_name))
    plt.savefig("./saved/cm/cm_{}.pdf".format(checkpoint_name))
    plt.close()
    print("save at ./saved/cm/cm_{}.pdf".format(checkpoint_name))
Example #8
0
def main():
    with open("./configs/" + dataset_name + "_config.json") as f:
        configs = json.load(f)
    data_path = configs["data_path"]

    for i in range(0, configs["k-fold"]):
        if best_checkpoint_selection == 1 and configs["k-fold"] == 1:
            checkpoint_name = check_name
        else:
            checkpoint_name = check_name.format(i + 1)

        check_log_name = checkpoint_name + "_" + test_name

        configs["data_path"] = data_path + "/fold_" + str(i + 1)

        if dataset_name == "video":
            data_vectors = pd.read_csv(
                os.path.join(configs["data_path"], "test.csv"))
            image_name_vector = data_vectors["image_name"].tolist()

        acc = 0.0
        state = torch.load("./saved/checkpoints/{}".format(checkpoint_name))

        from models import vgg19

        model = vgg19

        model = model(in_channels=3, num_classes=7).cuda()
        model.load_state_dict(state["net"])
        model.eval()

        correct = 0
        total = 0
        all_target = []
        all_output = []

        if dataset_name == "fer2013":
            test_set = fer2013("test", configs, tta=True, tta_size=10)
            # test_set = fer2013('test', configs, tta=False, tta_size=0)
        else:
            test_set = video("test", configs, tta=True, tta_size=10)

        if best_checkpoint_selection == 1 and configs["k-fold"] == 1:
            print("Testing on private test with tta..".format(i + 1))
        else:
            print("Testing fold {} on private test with tta..".format(i + 1))

        with torch.no_grad():
            for idx in tqdm(range(len(test_set)),
                            total=len(test_set),
                            leave=False):
                images, targets = test_set[idx]

                images = make_batch(images)
                images = images.cuda(non_blocking=True)

                outputs = model(images).cpu()
                outputs = F.softmax(outputs, 1)

                # outputs.shape [tta_size, 7]
                outputs = torch.sum(outputs, 0)
                outputs = torch.argmax(outputs, 0)
                outputs = outputs.item()
                targets = targets.item()
                total += 1
                correct += outputs == targets

                all_target.append(targets)
                all_output.append(outputs)

                # if len(np.unique(all_target)) == 7:
                #     break
                # if idx == 10:
                #     break

        acc = 100. * correct / total
        #print("Accuracy on private test with tta: {:.3f}".format(acc))

        all_target = np.array(all_target)
        all_output = np.array(all_output)

        matrix = confusion_matrix(all_target, all_output)
        np.set_printoptions(precision=2)

        if best_checkpoint_selection == 1 and configs["k-fold"] == 1:
            validated = "1"
        else:
            validated = "all_results"

        log_name = check_test_name.format(validated)

        if not best_checkpoint_selection == 1 and configs[
                "k-fold"] != 1 and cm_normalization == True:
            Log("Test fold {}\n\n".format(i + 1),
                "./saved/results/{}.txt".format(log_name))

        # plt.figure(figsize=(5, 5))
        plot_confusion_matrix(
            matrix,
            classes=class_names,
            normalize=cm_normalization,
            # title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
            title="Vgg19",
            log_name=log_name,
            check_log_name=check_log_name)

        class_report = classification_report(all_target,
                                             all_output,
                                             target_names=class_names)
        print("Classification report")
        print(class_report)
        Log("\n\nClassification report\n",
            "./saved/results/{}.txt".format(log_name))
        Log(class_report, "./saved/results/{}.txt".format(log_name))
        Log("\n\n", "./saved/results/{}.txt".format(log_name))

        if dataset_name == "video" and best_checkpoint_selection == 1:
            t_image_name_vector = ["Image names"]
            t_all_target = ["true"]
            t_all_output = ["predicted"]
            for j in range(0, len(image_name_vector)):
                t_image_name_vector.append(image_name_vector[j])
                t_all_target.append(str(all_target[j]))
                t_all_output.append(str(all_output[j]))
            t = PrettyTable(t_image_name_vector)
            t.add_row(t_all_target)
            t.add_row(t_all_output)
            print("Image predictions")
            print(t)
            Log("Image predictions\n",
                "./saved/results/{}.txt".format(log_name))
            Log(t, "./saved/results/{}.txt".format(log_name))
            Log("\n\n", "./saved/results/{}.txt".format(log_name))