コード例 #1
0
def vis_embed(src_model, tgt_model, src_loader, tgt_loader_eval):
    X, _ = losses.extract_embeddings(src_model, src_loader)

    Y, _ = losses.extract_embeddings(tgt_model, tgt_loader_eval)

    Y_labels = np.ones(Y.shape[0]) * 2
    X_labels = np.ones(X.shape[0]) * 1
    scatter(np.vstack([Y, X]), np.hstack([Y_labels, X_labels]))
コード例 #2
0
ファイル: vis.py プロジェクト: deep0learning/M-ADDA
def scatter_target(src_model, tgt_model, src_loader, 
    tgt_loader_eval, fname="p3"):
    X,_ = losses.extract_embeddings(src_model, src_loader)
    Y,_ = losses.extract_embeddings(tgt_model, tgt_loader_eval)
    import pylab as plt
    plt.figure(figsize=(10,10))
    plt.scatter(X[:,0], X[:,1], s=8.0, color="C0")
    plt.scatter(Y[:,0], Y[:,1], s=8.0, color="C1")
    plt.grid(True)
    plt.rc('grid', linestyle="--", color='grey')
    plt.savefig("/mnt/home/issam/Summaries/{}.pdf".format(fname))
    plt.close()
コード例 #3
0
def validate(src_model, tgt_model, src_data_loader, tgt_data_loader):
    """Evaluation for target encoder by source classifier on target dataset."""
    # set eval state for Dropout and BN layers
    with torch.no_grad():
        X, y = losses.extract_embeddings(src_model, src_data_loader)
        Xtest, ytest = losses.extract_embeddings(tgt_model, tgt_data_loader)
        
        clf = neighbors.KNeighborsClassifier(n_neighbors=2)
        clf.fit(X, y)
        y_pred = clf.predict(Xtest)

        acc = (y_pred == ytest).mean()
        # print(acc)

    return acc
コード例 #4
0
def scatter_source(src_model,
                   tgt_model,
                   src_loader,
                   tgt_loader_eval,
                   fname="p1"):
    X, _ = losses.extract_embeddings(src_model, src_loader)
    import pylab as plt
    plt.figure(figsize=(10, 10))
    plt.scatter(X[:, 0], X[:, 1], s=8.0, color="C0")
    plt.grid(True)
    plt.rc('grid', linestyle="--", color='grey')
    plt.savefig("{}.pdf".format(fname))
    plt.close()
コード例 #5
0
ファイル: train.py プロジェクト: hlemoine3000/M-ADDA
def fit_center(src_model,
               tgt_model,
               src_loader,
               tgt_loader,
               opt_tgt,
               epochs=30):
    """Train classifier for source domain."""
    ####################
    # 1. setup network #
    ####################
    n_classes = tgt_model.n_classes
    # set train state for Dropout and BN layers
    src_model.train()
    tgt_model.train()

    src_embeddings, _ = losses.extract_embeddings(src_model, src_loader)

    src_kmeans = KMeans(n_clusters=n_classes)
    src_kmeans.fit(src_embeddings)

    #src_centers = torch.FloatTensor(src_kmeans.means_).cuda()
    src_centers = torch.FloatTensor(src_kmeans.cluster_centers_).cuda()

    ####################
    # 2. train network #
    ####################

    for epoch in range(epochs):
        for step, (images, labels) in enumerate(tgt_loader):
            # make images and labels variable
            images = images.cuda()
            labels = labels.squeeze_().cuda()

            # zero gradients for opt
            opt_tgt.zero_grad()

            # compute loss for critic
            loss = losses.center_loss(tgt_model, {
                "X": images,
                "y": labels
            }, src_model, src_centers, None, src_kmeans, None)
            # optimize source classifier
            loss.backward()
            opt_tgt.step()
コード例 #6
0
def visEmbed(exp_dict):
    src_loader = datasets.get_loader(exp_dict["src_dataset"],
                                     "train",
                                     batch_size=exp_dict["src_batch_size"])

    tgt_val_loader = datasets.get_loader(exp_dict["tgt_dataset"],
                                         "val",
                                         batch_size=exp_dict["tgt_batch_size"])

    src_model, src_opt = models.get_model(exp_dict["src_model"],
                                          exp_dict["n_outputs"])
    src_model.load_state_dict(torch.load(exp_dict["path"] + "/model_src.pth"))

    tgt_model, tgt_opt = models.get_model(exp_dict["tgt_model"],
                                          exp_dict["n_outputs"])
    tgt_model.load_state_dict(torch.load(exp_dict["path"] + "/model_tgt.pth"))

    X, X_tgt = losses.extract_embeddings(src_model, src_loader)

    Y, Y_tgt = losses.extract_embeddings(tgt_model, tgt_val_loader)

    X, X_tgt = X[:500], X_tgt[:500]
    Y, Y_tgt = Y[:500], Y_tgt[:500]

    src_kmeans = KMeans(n_clusters=10)
    src_kmeans.fit(X)
    Xc = src_kmeans.cluster_centers_

    clf = neighbors.KNeighborsClassifier(n_neighbors=2)
    clf.fit(X, X_tgt)
    Xc_tgt = clf.predict(Xc)

    # acc_tgt = test.validate(src_model, tgt_model,
    #                                 src_loader,
    #                                 tgt_val_loader)

    tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
    #tsne.fit(Y[:500])
    S_tsne = tsne.fit_transform(np.vstack([Y, X, Xc]))
    #X_tsne = tsne.transform(X[:500])
    Y_tsne = S_tsne[:Y.shape[0]]
    X_tsne = S_tsne[Y.shape[0]:-10]
    Xc_tsne = S_tsne[-10:]
    # plt.mpl.rcParams['grid.color'] = 'k'
    # plt.mpl.rcParams['grid.linestyle'] = ':'
    # plt.mpl.rcParams['grid.linewidth'] = 0.5
    # Y_labels = Y_labels
    # X_labels = X_labels

    # scatter(Y_tsne, Y_tgt+1, win="1", title="target - {}".format(exp_dict["tgt_dataset"]))
    # scatter(X_tsne, X_tgt+1, win="2",title="source - {}".format(exp_dict["src_dataset"]))

    colors = [
        "b", "g", "r", "c", "m", "y", "gray", "w", "chocolate", "olive", "pink"
    ]

    if 1:
        fig = plt.figure(figsize=(6, 6))
        plt.grid(linestyle='dotted')
        plt.scatter(X_tsne[:, 0], X_tsne[:, 1], alpha=0.6, edgecolors="black")

        for c in range(10):
            ind = Xc_tgt == c
            color = colors[c + 1]
            plt.scatter(Xc_tsne[ind][:, 0],
                        Xc_tsne[ind][:, 1],
                        s=250,
                        c=color,
                        edgecolors="black",
                        marker="*")
        # plt.axes().set_aspect('equal', 'datalim')
        plt.xlabel("t-SNE Feature 2")
        plt.ylabel("t-SNE Feature 1")
        title = "Source Dataset ({}) - Center: {} - Adv: {}".format(
            exp_dict["src_dataset"].upper().replace("BIG", ""),
            exp_dict["options"]["center"], exp_dict["options"]["disc"])
        plt.title(title)
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])
        plt.savefig("src_{}.pdf".format(exp_dict["exp_name"].replace(" ", "")),
                    bbox_inches='tight',
                    transparent=False)

        plt.savefig("src_{}.png".format(exp_dict["exp_name"]),
                    bbox_inches='tight',
                    transparent=False)
        # ms.visplot(fig)

    if 1:

        fig = plt.figure(figsize=(6, 6))
        plt.grid(linestyle='dotted')
        for c in range(10):
            ind = Y_tgt == c
            color = colors[c + 1]

            plt.scatter(Y_tsne[ind][:, 0],
                        Y_tsne[ind][:, 1],
                        alpha=0.6,
                        c=color,
                        edgecolors="black")

        for c in range(10):
            ind = Xc_tgt == c
            color = colors[c + 1]
            plt.scatter(Xc_tsne[ind][:, 0],
                        Xc_tsne[ind][:, 1],
                        s=350,
                        c=color,
                        edgecolors="black",
                        marker="*")
        # plt.axes().set_aspect('equal', 'datalim')
        plt.xlabel("t-SNE Feature 2")
        plt.ylabel("t-SNE Feature 1")
        title = "Target Dataset ({}) - Center: {} - Adv: {}".format(
            exp_dict["tgt_dataset"].upper().replace("BIG", ""),
            exp_dict["options"]["center"], exp_dict["options"]["disc"])
        plt.title(title)
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])
        plt.savefig("tgt_{}.pdf".format(exp_dict["exp_name"]),
                    bbox_inches='tight',
                    transparent=False)

        plt.savefig("tgt_{}.png".format(exp_dict["exp_name"]),
                    bbox_inches='tight',
                    transparent=False)
コード例 #7
0
def fit_center(src_model,
               tgt_model,
               src_loader,
               tgt_loader,
               opt_tgt,
               epochs=30):
    """Train classifier for source domain."""
    ####################
    # 1. setup network #
    ####################
    #print(type(tgt_model.last.bias.size()[0]))
    n_classes = tgt_model.last.bias.size()[0]

    # set train state for Dropout and BN layers
    src_model.train()
    tgt_model.train()

    src_embeddings, _ = losses.extract_embeddings(src_model, src_loader)

    src_kmeans = KMeans(n_clusters=n_classes)
    src_kmeans.fit(src_embeddings)

    #src_centers = torch.FloatTensor(src_kmeans.means_).cuda()
    src_centers = torch.FloatTensor(src_kmeans.cluster_centers_).cuda()

    ####################

    # 2. Doing Domain Adaptation#
    ####################

    for epoch in range(epochs):
        for step, (images, labels) in enumerate(tgt_loader):
            # make images and labels variable
            images = images.cuda()
            labels = labels.squeeze_().cuda()

            # zero gradients for opt
            opt_tgt.zero_grad()

            # compute loss for critic
            loss = losses.center_loss(tgt_model, {
                "X": images,
                "y": labels
            }, src_model, src_centers, None, src_kmeans, None)
            # optimize source classifier
            loss.backward()
            opt_tgt.step()

    # Evaluation


# video1_test_loader = get_coxs2v_testset(exp_dict['still_dir'],
#                                  exp_dict['video1_dir'],
#                                  exp_dict['video1_pairs'],
#                                  test_folds,
#                                  exp_dict["cross_validation_num_fold"],
#                                  data_transform,
#                                  50)
#
# video2_test_loader = get_coxs2v_testset(exp_dict['still_dir'],
#                                         exp_dict['video2_dir'],
#                                         exp_dict['video2_pairs'],
#                                         test_folds,
#                                         exp_dict["cross_validation_num_fold"],
#                                         data_transform,
#                                         50)
#
# src_acc = Evaluate(video1_test_loader,
#                    src_model,
#                    device,
#                    0,
#                    nrof_folds=10)