Exemplo n.º 1
0
def eval_text_decoding(model, discriminator):
    model.eval()
    rev_classes = {v: k for k, v in train_set.class_to_idx.items()}

    train_fruit_names = [x for x in rev_classes.keys() if x not in TEST_FRUITS]
    train_fruit_names = list(map(lambda x: rev_classes[x], train_fruit_names))

    random.shuffle(train_fruit_names)
    train_fruit_names = [
        'Tamarillo', 'Apple Golden 2', 'Pepper Green', 'Quince',
        'Pepper Yellow', 'Apple Granny Smith', 'Cherry 2', 'Apple Braeburn',
        'Mangostan'
    ]
    images = []
    for name in train_fruit_names:
        print('Generating instance of {}'.format(name))
        print('Description: {}'.format(fruit_name_to_desc[name].strip()))
        instn = generate_instance(name).to(DEVICE)
        output_img, mu, logvar = model(instn)
        img = classifier.imshow(output_img[0].cpu().detach())
        images.append((img, name))
        classification = discriminator(output_img)
        top_k = classification.topk(90)[1][0]
        rank = 90
        for idx in range(len(top_k)):
            if top_k[idx].item() == train_set.class_to_idx[name]:
                rank = idx + 1
        print('Rank of true label: {}'.format(rank))
        _, predicted = torch.max(classification, 1)
        predicted = rev_classes[predicted[0].item()]
        print('Classifier predicted {}\n\n'.format(predicted))
    display_images(images,
                   ROOT + '/Results/IndividualSentences/train_fruits.png')
    test_fruit_names = [x for x in rev_classes.keys() if x in TEST_FRUITS]
    test_fruit_names = list(map(lambda x: rev_classes[x], test_fruit_names))

    images = []
    for name in test_fruit_names:
        print('Generating instance of {}'.format(name))
        print('Description: {}'.format(fruit_name_to_desc[name].strip()))
        instn = generate_instance(name).to(DEVICE)
        output_img, mu, logvar = model(instn)
        img = classifier.imshow(output_img[0].cpu().detach())
        images.append((img, name))
        classification = discriminator(output_img)
        top_k = classification.topk(90)[1][0]
        rank = 90
        for idx in range(len(top_k)):
            if top_k[idx].item() == train_set.class_to_idx[name]:
                rank = idx + 1
        print('Rank of true label: {}'.format(rank))
        _, predicted = torch.max(classification, 1)
        predicted = rev_classes[predicted[0].item()]
        print('Classifier predicted {}\n\n'.format(predicted))
    display_images(images,
                   ROOT + '/Results/IndividualSentences/test_fruits.png')
Exemplo n.º 2
0
def eval_text_decoding(model, discriminator, enc):
    model.eval()
    rev_classes = {v: k for k, v in train_set.class_to_idx.items()}

    train_fruit_names = [x for x in rev_classes.keys() if x in TEST_FRUITS]
    train_fruit_names = list(map(lambda x: rev_classes[x], train_fruit_names))
    images = []
    for name in train_fruit_names:
        print('Generating instance of {}'.format(name))
        print('Description: {}'.format(fruit_name_to_desc[name]))
        instn = generate_instance(name).to(DEVICE)
        output_img, mu, logvar = model(instn)
        encoding = enc(instn)
        # print('img: {}'.format((str(output_img))))
        # print('Mu: {}'.format(str(mu)))
        # print('logvar: {}'.format(str(logvar)))
        img = classifier.imshow(output_img[0].cpu().detach())
        images.append((img, name))

        classification = discriminator(output_img)
        top_k = classification.topk(90)[1][0]
        rank = 90
        for idx in range(len(top_k)):
            if top_k[idx].item() == train_set.class_to_idx[name]:
                rank = idx + 1
        print('Rank of true label: {}'.format(rank))
        _, predicted = torch.max(classification, 1)
        predicted = rev_classes[predicted[0].item()]
        print('Classifier predicted {}\n\n'.format(predicted))
    display_images(
        images,
        '/hdd/home/Documents/Research/DecodingDefinitionsToObjects/Results/Pretraining/test_fruits.png'
    )
Exemplo n.º 3
0
def evaluate_novel_descriptions(descriptions, model):
    text_cnn = TextCNN()
    text_cnn.load_state_dict(torch.load(model))
    text_cnn.to(DEVICE)
    text_cnn.eval()

    DISCRIMNATOR_PATH = ROOT + '/machine_learning/discriminator/result/fruits_net_30_epochs.pth'

    discriminator = classifier.Net()
    discriminator.load_state_dict(torch.load(DISCRIMNATOR_PATH))
    discriminator.to(DEVICE)
    discriminator.eval()

    rev_classes = {v: k for k, v in train_set.class_to_idx.items()}

    images = []
    embeds = generate_text_instances(descriptions)
    embeds = embeds.to(DEVICE)
    imgs, _, _ = text_cnn(embeds)
    classifications = discriminator(imgs)
    for idx, output_img in enumerate(imgs):
        img = classifier.imshow(output_img.cpu().detach())
        images.append((img, descriptions[idx]))
        top_k = classifications[idx].topk(90)[1]
        print('Description {}'.format(descriptions[idx]))
        for idx in range(3):
            print('Prediction number {}: {}'.format(
                idx + 1, rev_classes[top_k[idx].item()]))
        print('\n')
    display_images(images,
                   ROOT + '/Results/WithRandomness/novel_descriptions.png',
                   cols=1)
Exemplo n.º 4
0
def interpolate(model, descriptions, path):
    text_cnn = TextCNN()
    text_cnn.load_state_dict(torch.load(model))
    text_cnn.to(DEVICE)
    text_cnn.eval()

    descs = generate_text_instances(descriptions).to(DEVICE)

    mus, logvars = text_cnn.encode(descs)

    mu0 = mus[0].view(1, 256)
    mu1 = mus[1].view(1, 256)

    logvar0 = logvars[0].view(1, 256)
    logvar1 = logvars[1].view(1, 256)

    images = []
    for i in range(0, 6):
        mu = i / 5 * mu0 + (1 - i / 5) * mu1
        logvar = i / 5 * logvar0 + (1 - i / 5) * logvar1
        img = text_cnn.decode(text_cnn.reparameterize(mu, logvar))[0]
        img = classifier.imshow(img.cpu().detach())
        images.append(
            (img, '{}% {}, {}%, {}'.format(int(100 * i / 5), descriptions[0],
                                           int(100 * (1 - i / 5)),
                                           descriptions[1])))
    display_images(images, path, cols=1)
Exemplo n.º 5
0
def evaluate_repl(model):
    import warnings
    warnings.filterwarnings("ignore")
    text_cnn = TextCNN()
    text_cnn.load_state_dict(torch.load(model))
    text_cnn.to(DEVICE)
    text_cnn.eval()

    DISCRIMNATOR_PATH = ROOT + '/machine_learning/discriminator/result/fruits_net_30_epochs.pth'

    discriminator = classifier.Net()
    discriminator.load_state_dict(torch.load(DISCRIMNATOR_PATH))
    discriminator.to(DEVICE)
    discriminator.eval()

    rev_classes = {v: k for k, v in train_set.class_to_idx.items()}

    images = []
    counter = 0
    while True:
        desc = input(
            'Type in a description of a fruit and press enter. Press q to quit.'
        )
        if desc == 'q':
            break
        else:
            embeds = generate_text_instances([desc])
            embeds = embeds.to(DEVICE)
            imgs, _, _ = text_cnn(embeds)
            classifications = discriminator(imgs)
            for idx, output_img in enumerate(imgs):
                img = classifier.imshow(output_img.cpu().detach())
                top = classifications[idx].topk(90)[1][0].item()
                print('The classifier thought your fruit looked like a {}'.
                      format(rev_classes[top]))
                f, axarr = plt.subplots(1, 1)
                axarr.axis('off')
                axarr.imshow(img)
                axarr.set_title(desc)
                plt.savefig(
                    ROOT +
                    '/Results/FuglyFruits/fugly_fruit_{}.png'.format(counter))
                plt.show()
                counter += 1