Exemplo n.º 1
0
# In[4]:

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# prepare model
# init models
src_encoder = LeNetEncoder()
tgt_encoder = LeNetEncoder()

# to device
src_encoder.to(device)
tgt_encoder.to(device)

# init weights
src_encoder.load_state_dict(
    torch.load(output_src_encoder_path, map_location=device))

tgt_encoder.load_state_dict(
    torch.load(output_tgt_encoder_path, map_location=device))

# In[5]:

# get latent space
latents = []
labels = []
domains = []

src_encoder.eval()
tgt_encoder.eval()

for sources in tqdm(source_dataloader):
Exemplo n.º 2
0
target_encoder = LeNetEncoder()

# Select model that was trained on the correct dataset
if opt.source == "SVHN":
    target_encoder_name = "ADDA-target-encoder-SVHN_MNISTM.pt"
    target_encoder_pth = os.path.join(opt.save_dir, target_encoder_name)

elif opt.source == "MNISTM":
    target_encoder_name = "ADDA-target-encoder-MNISTM_SVHN.pt"
    target_encoder_pth = os.path.join(opt.save_dir, target_encoder_name)

# Load model
if os.path.exists(target_encoder_pth):
    print("---> found previously saved {}, loading checkpoint...".format(
        target_encoder_name))
    target_encoder.load_state_dict(torch.load(target_encoder_pth))
else:
    print("Error: target encoder not loaded")

# Move to GPU
if cuda:
    target_encoder = target_encoder.cuda()

# ----------
#  Features
# ----------

print("---> generating features...")

target_encoder.eval()
Exemplo n.º 3
0
src_classifier = LeNetClassifier().to(DEVICE)

tgt_encoder = LeNetEncoder().to(DEVICE)
discriminator = Discriminator(input_dims=500, hidden_dims=500,
                              output_dims=2).to(DEVICE)

# In[5]:

#Print models for source
print(src_encoder)
print(src_classifier)

# In[6]:

try:
    src_encoder.load_state_dict(torch.load('src_encoder.pth'))
    src_classifier.load_state_dict(torch.load('src_classifier.pth'))
except FileNotFoundError:
    pretrain.train_src(src_encoder, src_classifier, source_train_loader,
                       DEVICE)

# In[7]:

#Evaluate pretrained model
pretrain.eval_src(src_encoder, src_classifier, source_train_loader, DEVICE)

# In[8]:

#Evaluate pretrained model
pretrain.eval_src(src_encoder, src_classifier, target_test_loader, DEVICE)
Exemplo n.º 4
0
# eval model on target
eval_trange = tqdm(val_tgt_dataloader)
eval_tgt_loss, eval_tgt_corrects = eval_src_only_one_epoch(eval_trange)
            
eval_epoch_tgt_loss = eval_tgt_loss / len(val_tgt_dataset)
eval_epoch_tgt_acc = eval_tgt_corrects / len(val_tgt_dataset)
print("Val | Tgt Loss: {:.5f} | Tgt Accuracy: {:.5f}".format(eval_epoch_tgt_loss, eval_epoch_tgt_acc))
acc_arr.append(eval_epoch_tgt_acc)

best_tgt_acc = eval_epoch_tgt_acc
best_tgt_encoder = best_src_encoder
best_discriminator = None
    
# init target encoder with source encoder
if best_src_encoder is not None:
    tgt_encoder.load_state_dict(best_src_encoder)
    
print("### [Info] Training target encoder by GAN ###")
for epoch in range(tgt_num_epochs):
    print("Epoch {}/{}".format(epoch + 1, tgt_num_epochs))
    
    # train model on target
    tgt_encoder.train()
    discriminator.train()    
    trange = tqdm(zip(src_dataloader, tgt_dataloader), total=_len)
    running_loss_tgt, running_loss_discriminator = train_tgt_one_epoch(trange)

    # eval model on target
    tgt_encoder.eval()
    discriminator.eval()
    eval_trange = tqdm(val_tgt_dataloader)
Exemplo n.º 5
0
def main(args):
    # read from args
    test_path = args.test_path
    d_target = args.d_target
    output_predict_path = args.output_predict_path

    ########## Arguments ##########
    batch_size = 128

    # svhn, usps, mnistm
    if d_target == "mnistm":
        d_source = "usps"
    elif d_target == "svhn":
        d_source = "mnistm"
    else:
        d_source = "svhn"

    output_src_classifier_path = "./hw3-4/models/src_classifier_{}_{}.pth".format(
        d_source, d_target)
    output_tgt_encoder_path = "./hw3-4//models/tgt_encoder_{}_{}.pth".format(
        d_source, d_target)

    #############################

    dataset = ReproduceDataset(test_path)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # init models
    tgt_encoder = LeNetEncoder()
    src_classifier = LeNetClassifier()

    # to device
    tgt_encoder.to(device)
    src_classifier.to(device)

    # init weights
    tgt_encoder.load_state_dict(torch.load(
        output_tgt_encoder_path, map_location=device))

    src_classifier.load_state_dict(torch.load(
        output_src_classifier_path, map_location=device))

    tgt_encoder.eval()
    src_classifier.eval()

    all_pred = []

    for idx, targets in enumerate(dataloader):
        target_images = targets.to(device)
        target_bs = target_images.shape[0]

        with torch.no_grad():
            preds = src_classifier(tgt_encoder(target_images))

        # calculate label acc
        _, pred_labels = torch.max(preds, 1)
        all_pred.append(pred_labels)

    # save to predict
    pred = torch.cat(all_pred).cpu().numpy()
    image_names = ['{:05}.png'.format(i) for i in range(len(pred))]

    pd.DataFrame({
        'image_name': image_names,
        'label': pred
    }).to_csv(output_predict_path, index=False)