Ejemplo n.º 1
0
def visualizePerformance(feature_extractor, class_classifier,
                         domain_classifier, src_test_dataloader,
                         tgt_test_dataloader):
    """
    Evaluate the performance of dann and source only by visualization.

    :param feature_extractor: network used to extract feature from target samples
    :param class_classifier: network used to predict labels
    :param domain_classifier: network used to predict domain
    :param source_dataloader: test dataloader of source domain
    :param target_dataloader: test dataloader of target domain
    :return:
    """
    # Setup the network
    feature_extractor.eval()
    class_classifier.eval()
    domain_classifier.eval()

    # Randomly select samples from source domain and target domain.
    dataiter = iter(src_test_dataloader)
    s_images, s_labels = dataiter.next()
    if params.use_gpu:
        s_images = Variable(s_images.cuda())
    else:
        s_images = Variable(s_images)

    s_tags = Variable(torch.zeros((s_labels.size()[0])).type(torch.LongTensor))

    dataiter = iter(tgt_test_dataloader)
    t_images, t_labels = dataiter.next()
    if params.use_gpu:
        t_images = Variable(t_images.cuda())
    else:
        t_images, t_labels = Variable(t_images), Variable(t_labels)

    t_tags = Variable(torch.ones((t_labels.size()[0])).type(torch.LongTensor))

    # Compute the embedding of target domain.
    embedding1 = feature_extractor(s_images)
    embedding2 = feature_extractor(t_images)

    tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3000)

    if params.use_gpu:
        dann_tsne = tsne.fit_transform(
            np.concatenate((embedding1.cpu().detach().numpy(),
                            embedding2.cpu().detach().numpy())))
    else:
        dann_tsne = tsne.fit_transform(
            np.concatenate(
                (embedding1.detach().numpy(), embedding2.detach().numpy())))

    # utils.plot_embedding(source_only_tsne, combined_test_labels.argmax(1), combined_test_domain.argmax(1), 'Source only')
    utils.plot_embedding(dann_tsne, np.concatenate((s_labels, t_labels)),
                         np.concatenate((s_tags, t_tags)), 'Domain Adaptation')
Ejemplo n.º 2
0
Archivo: main.py Proyecto: yhlhit/dann
def visualizePerformance(feature_extractor,
                         class_classifier,
                         domain_classifier,
                         src_test_dataloader,
                         tgt_test_dataloader,
                         num_of_samples=None,
                         imgName=None):
    """
    Evaluate the performance of dann and source only by visualization.

    :param feature_extractor: network used to extract feature from target samples
    :param class_classifier: network used to predict labels
    :param domain_classifier: network used to predict domain
    :param source_dataloader: test dataloader of source domain
    :param target_dataloader: test dataloader of target domain
    :param num_of_samples: the number of samples (from train and test respectively) for t-sne
    :param imgName: the name of saving image

    :return:
    """

    # Setup the network
    feature_extractor.eval()
    class_classifier.eval()
    domain_classifier.eval()

    # Randomly select samples from source domain and target domain.
    if num_of_samples is None:
        num_of_samples = params.batch_size
    else:
        assert len(src_test_dataloader) * num_of_samples, \
            'The number of samples can not bigger than dataset.' # NOT PRECISELY COMPUTATION

    # Collect source data.
    s_images, s_labels, s_tags = [], [], []
    for batch in src_test_dataloader:
        images, labels = batch

        if params.use_gpu:
            s_images.append(images.cuda())
        else:
            s_images.append(images)
        s_labels.append(labels)

        s_tags.append(torch.zeros((labels.size()[0])).type(torch.LongTensor))

        if len(s_images * params.batch_size) > num_of_samples:
            break

    s_images, s_labels, s_tags = torch.cat(s_images)[:num_of_samples], \
                                 torch.cat(s_labels)[:num_of_samples], torch.cat(s_tags)[:num_of_samples]

    # Collect test data.
    t_images, t_labels, t_tags = [], [], []
    for batch in tgt_test_dataloader:
        images, labels = batch

        if params.use_gpu:
            t_images.append(images.cuda())
        else:
            t_images.append(images)
        t_labels.append(labels)

        t_tags.append(torch.ones((labels.size()[0])).type(torch.LongTensor))

        if len(t_images * params.batch_size) > num_of_samples:
            break

    t_images, t_labels, t_tags = torch.cat(t_images)[:num_of_samples], \
                                 torch.cat(t_labels)[:num_of_samples], torch.cat(t_tags)[:num_of_samples]

    # Compute the embedding of target domain.
    embedding1 = feature_extractor(s_images)
    embedding2 = feature_extractor(t_images)

    tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3000)

    if params.use_gpu:
        dann_tsne = tsne.fit_transform(
            np.concatenate((embedding1.cpu().detach().numpy(),
                            embedding2.cpu().detach().numpy())))
    else:
        dann_tsne = tsne.fit_transform(
            np.concatenate(
                (embedding1.detach().numpy(), embedding2.detach().numpy())))

    utils.plot_embedding(dann_tsne, np.concatenate((s_labels, t_labels)),
                         np.concatenate((s_tags, t_tags)), 'Domain Adaptation',
                         imgName)