Exemplo n.º 1
0
    def plot_embedding(self,
                       resultpath=None,
                       algos=None,
                       show_label=False,
                       disp_num_r_n_e=20):
        """Function to plot the embedding.

            Args:
                resultpath (str): Path where the result will be saved.
                show_label (bool): If True, will display the labels.
                algos (str): Name of the algorithms that generated the embedding.
                disp_num_r_n_e (int): Total number of entities to display for head, tail and relation.

        """
        assert self.model is not None, 'Please provide a model!'

        if self.ent_only_plot:
            x = torch.cat(self.h_emb + self.t_emb, dim=0)
            ent_names = np.concatenate((self.h_name, self.t_name), axis=0)
            self._logger.info("\t Reducing dimension using TSNE to 2!")
            x = TSNE(n_components=2).fit_transform(x.detach().cpu())
            x = np.asarray(x)
            ent_names = np.asarray(ent_names)

            self.draw_embedding(x, ent_names, resultpath,
                                algos + '_entity_plot', show_label)

        if self.rel_only_plot:
            x = torch.cat(self.r_emb, dim=0)
            self._logger.info("\t Reducing dimension using TSNE to 2!")
            x = TSNE(n_components=2).fit_transform(x.detach().cpu())
            self.draw_embedding(x, self.r_name, resultpath,
                                algos + '_rel_plot', show_label)

        if self.ent_and_rel_plot:
            length = len(self.h_proj_emb)
            x = torch.cat(self.h_proj_emb + self.r_proj_emb + self.t_proj_emb,
                          dim=0)
            self._logger.info("\t Reducing dimension using TSNE to 2!")
            x = TSNE(n_components=2).fit_transform(x.detach().cpu())

            h_embs = x[:length, :]
            r_embs = x[length:2 * length, :]
            t_embs = x[2 * length:3 * length, :]

            self.draw_embedding_rel_space(
                h_embs[:disp_num_r_n_e], r_embs[:disp_num_r_n_e],
                t_embs[:disp_num_r_n_e], self.h_name[:disp_num_r_n_e],
                self.r_name[:disp_num_r_n_e], self.t_name[:disp_num_r_n_e],
                resultpath, algos + '_ent_n_rel_plot', show_label)
def plot_points(embedding, colors=None):
    # plots the embedding using the classes defined in data.y
    reset_random()  # only needed for TSNE
    if colors == None:
        colors = [
            '#ffc0cb', '#bada55', '#008080', '#420420', '#7fe5f0', '#065535',
            '#ffd700'
        ]
    print(np.shape(embedding))
    if np.shape(embedding)[1] > 2:
        embedding = TSNE(n_components=2).fit_transform(
            embedding.detach().cpu().numpy())
    y = data.y.cpu().numpy()

    plt.figure(figsize=(8, 8))
    for i in range(dataset.num_classes):
        plt.scatter(embedding[y == i, 0],
                    embedding[y == i, 1],
                    s=20,
                    color=colors[i])
    plt.axis('off')
    plt.show()
Exemplo n.º 3
0
for i, ((source_data, source_label),
        (target_data,
         _)) in enumerate(zip(source_dataloader, target_dataloader)):

    source_data = source_data.cuda()
    source_label = source_label.cuda()
    target_data = target_data.cuda()

    # 我們把source data和target data混在一起,否則batch_norm可能會算錯 (兩邊的data的mean/var不太一樣)
    mixed_data = torch.cat([source_data, target_data], dim=0)
    domain_label = torch.zeros(
        [source_data.shape[0] + target_data.shape[0], 1]).cuda()
    # 設定source data的label為1
    domain_label[:source_data.shape[0]] = 1
    domain_label = TSNE(n_components=2).fit_transform(
        domain_label.detach().cpu().numpy())
    # Step 1 : 訓練Domain Classifier
    feature = feature_extractor(mixed_data)
    #        domain_logits = domain_classifier(feature.detach())
    feature = TSNE(n_components=2).fit_transform(
        feature.detach().cpu().numpy())
    feature_extract.append(feature)
    answer.append(domain_label)
#        = TSNE(n_components=2).fit_transform(kpca)
# class_logits = label_predictor(feature[:source_data.shape[0]])
# domain_logits = domain_classifier(feature)
# loss = domain_criterion(domain_logits, domain_label)
# running_D_loss+= loss.item()
# loss.backward()
# optimizer_D.step()
Exemplo n.º 4
0
import numpy as np
from DataManager import DataManager
from util import TRAIN, SMALL
from sklearn.neighbors import NearestNeighbors

model = torch.load('.//data//budala_16.pickle',
                   map_location=torch.device('cpu'))

#data_manager_train = DataManager()

#tag_to_idx, idx_to_tag = data_manager_train.get_tag_dicts()

embedding = model.module.tag_embeddings.weight

embedding = TSNE(n_components=2).fit_transform(
    embedding.detach().long().numpy())
embedding_with = np.array([[x, y, i] for i, (x, y) in enumerate(embedding)],
                          dtype=np.int32)
plt.scatter(embedding[:, 0], embedding[:, 1])

# nearest neighbors
# nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(embedding.detach().long().numpy())
# distances, indices = nbrs.kneighbors(embedding.detach().long().numpy())
# print(indices)

# embeddings in 2D
x = embedding[:, 0]
y = embedding[:, 1]
names = np.array(list("ABCDEFGHIJKLMNO"))

c = np.random.randint(1, 5, size=15)