Exemplo n.º 1
0
def run():

    # load dataset
    src_data_loader = get_data_loader(params.src_dataset)
    tgt_data_loader = get_data_loader(params.tgt_dataset)

    # load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore)
    tgt_encoder = init_model(net=LeNetEncoder(),
                             restore=params.tgt_encoder_restore)
    critic = init_model(Discriminator(input_dims=params.d_input_dims,
                                      hidden_dims=params.d_hidden_dims,
                                      output_dims=params.d_output_dims),
                        restore=params.d_model_restore)

    # Adapt target encoder by GAN
    print("=== Training encoder for target domain ===")
    print(">>> Target Encoder <<<")
    im, _ = next(iter(tgt_data_loader))
    summary(tgt_encoder, input_size=im[0].size())
    print(">>> Critic <<<")
    print(critic)

    # init weights of target encoder with those of source encoder
    if not tgt_encoder.restored:
        tgt_encoder.load_state_dict(src_encoder.state_dict())

    # Train target
    if not (tgt_encoder.restored and critic.restored
            and params.tgt_model_trained):
        tgt_encoder = train_tgt(src_encoder, tgt_encoder, critic,
                                src_data_loader, tgt_data_loader)
Exemplo n.º 2
0
def run():

    # load source dataset
    src_data_loader = get_data_loader(params.src_dataset)
    src_data_loader_eval = get_data_loader(params.src_dataset, train=False)

    # load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=params.src_classifier_restore)

    # pre-train source model
    print("=== Training classifier for source domain ===")
    print(">>> Source Encoder <<<")
    im, _ = next(iter(src_data_loader))
    summary(src_encoder, input_size=im[0].size())
    print(">>> Source Classifier <<<")
    print(src_classifier)

    if not (src_encoder.restored and src_classifier.restored and
            params.src_model_trained):
        src_encoder, src_classifier = train_src(
            src_encoder, src_classifier, src_data_loader)

    # eval source model
    print("=== Evaluating classifier for source domain ===")
    eval_src(src_encoder, src_classifier, src_data_loader_eval)
Exemplo n.º 3
0
def office():
    init_random_seed(params.manual_seed)


    # load dataset
    src_data_loader = get_data_loader(params.src_dataset)
    src_data_loader_eval = get_data_loader(params.src_dataset, train=False)
    tgt_data_loader = get_data_loader(params.tgt_dataset)
    tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False)

    # load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=params.src_classifier_restore)
    tgt_encoder = init_model(net=LeNetEncoder(),
                             restore=params.tgt_encoder_restore)
    critic = init_model(Discriminator(input_dims=params.d_input_dims,
                                      hidden_dims=params.d_hidden_dims,
                                      output_dims=params.d_output_dims),
                        restore=params.d_model_restore)


    if not (src_encoder.restored and src_classifier.restored and
            params.src_model_trained):
        src_encoder, src_classifier = train_src(
            src_encoder, src_classifier, src_data_loader)

    # eval source model
    # print("=== Evaluating classifier for source domain ===")
    # eval_src(src_encoder, src_classifier, src_data_loader_eval)

    # train target encoder by GAN

    # init weights of target encoder with those of source encoder
    if not tgt_encoder.restored:
        tgt_encoder.load_state_dict(src_encoder.state_dict())

    if not (tgt_encoder.restored and critic.restored and
            params.tgt_model_trained):
        tgt_encoder = train_tgt(src_encoder, tgt_encoder, critic,
                                src_data_loader, tgt_data_loader)

    # eval target encoder on test set of target dataset
    print(">>> domain adaption <<<")
    acc = eval_tgt(tgt_encoder, src_classifier, tgt_data_loader_eval)
    return acc
Exemplo n.º 4
0
def run():

    # load dataset
    tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False)

    # Load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=params.src_classifier_restore)
    tgt_encoder = init_model(net=LeNetEncoder(),
                             restore=params.tgt_encoder_restore)

    # Evalute target encoder on test set of target dataset
    print("=== Evaluating classifier for encoded target domain ===")
    print(">>> source only <<<")
    eval_tgt(src_encoder, src_classifier, tgt_data_loader_eval)
    print(">>> domain adaption <<<")
    eval_tgt(tgt_encoder, src_classifier, tgt_data_loader_eval)
Exemplo n.º 5
0
def experiments(exp):

    #print(exp, case, affine, num_epochs)

    # init random seed
    #params.d_learning_rate = lr_d
    #params.c_learning_rate = lr_c
    init_random_seed(params.manual_seed)

    # load dataset
    src_dataset, tgt_dataset = exp.split('_')
    src_data_loader = get_data_loader(src_dataset)
    src_data_loader_eval = get_data_loader(src_dataset, train=False)

    tgt_data_loader = get_data_loader(tgt_dataset)
    tgt_data_loader_eval = get_data_loader(tgt_dataset, train=False)

    # load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore,
                             exp=exp)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=params.src_classifier_restore,
                                exp=exp)
    tgt_encoder = init_model(net=LeNetEncoder(),
                             restore=params.tgt_encoder_restore,
                             exp=exp)
    critic = init_model(Discriminator(input_dims=params.d_input_dims,
                                      hidden_dims=params.d_hidden_dims,
                                      output_dims=params.d_output_dims),
                        exp=exp,
                        restore=params.d_model_restore)

    # train source model
    print("=== Training classifier for source domain ===")
    print(">>> Source Encoder <<<")
    print(src_encoder)
    print(">>> Source Classifier <<<")
    print(src_classifier)

    if not (src_encoder.restored and src_classifier.restored
            and params.src_model_trained):
        src_encoder, src_classifier = train_src(exp, src_encoder,
                                                src_classifier,
                                                src_data_loader,
                                                src_data_loader_eval)

    # eval source model
    print("=== Evaluating classifier for source domain ===")
    evaluation(src_encoder, src_classifier, src_data_loader_eval)

    # train target encoder by GAN
    print("=== Training encoder for target domain ===")
    print(">>> Target Encoder <<<")
    print(tgt_encoder)
    print(">>> Critic <<<")
    print(critic)

    # init weights of target encoder with those of source encoder
    if not tgt_encoder.restored:
        tgt_encoder.load_state_dict(src_encoder.state_dict())

    if not (tgt_encoder.restored and critic.restored
            and params.tgt_model_trained):
        tgt_encoder = train_tgt(exp, src_encoder, tgt_encoder, critic,
                                src_classifier, src_data_loader,
                                tgt_data_loader, tgt_data_loader_eval)

    # eval target encoder on test set of target dataset
    print("=== Evaluating classifier for encoded target domain ===")
    print(">>> source only <<<")
    evaluation(src_encoder, src_classifier, tgt_data_loader_eval)
    print(">>> domain adaption <<<")
    evaluation(tgt_encoder, src_classifier, tgt_data_loader_eval)
Exemplo n.º 6
0
target_dataset = ImageDataset("test", d_target)

source_dataloader = DataLoader(source_dataset,
                               batch_size=batch_size,
                               shuffle=False)
target_dataloader = DataLoader(target_dataset,
                               batch_size=batch_size,
                               shuffle=False)

# In[4]:

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# prepare model
# init models
src_encoder = LeNetEncoder()
tgt_encoder = LeNetEncoder()

# to device
src_encoder.to(device)
tgt_encoder.to(device)

# init weights
src_encoder.load_state_dict(
    torch.load(output_src_encoder_path, map_location=device))

tgt_encoder.load_state_dict(
    torch.load(output_tgt_encoder_path, map_location=device))

# In[5]:
Exemplo n.º 7
0
from core import eval_src, eval_tgt, train_src, train_tgt
from models import Discriminator, LeNetClassifier, LeNetEncoder
from utils import get_data_loader, init_model, init_random_seed

if __name__ == '__main__':
    # init random seed
    init_random_seed(params.manual_seed)

    # load dataset
    src_data_loader = get_data_loader(params.src_dataset)
    src_data_loader_eval = get_data_loader(params.src_dataset, train=False)
    tgt_data_loader = get_data_loader(params.tgt_dataset)
    tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False)

    # load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=params.src_classifier_restore)
    tgt_encoder = init_model(net=LeNetEncoder(),
                             restore=params.tgt_encoder_restore)
    critic = init_model(Discriminator(input_dims=params.d_input_dims,
                                      hidden_dims=params.d_hidden_dims,
                                      output_dims=params.d_output_dims),
                        restore=params.d_model_restore)

    # train source model
    print("=== Training classifier for source domain ===")
    print(">>> Source Encoder <<<")
    print(src_encoder)
    print(">>> Source Classifier <<<")
Exemplo n.º 8
0
print("---> preparing dataloaders...")

if opt.source == "MNISTM":
    dataloader_source_test = get_data_loader("MNIST", train=False)
    dataloader_target_test = get_data_loader("SVHN", train=False)
elif opt.source == "SVHN":
    dataloader_source_test = get_data_loader("SVHN", train=False)
    dataloader_target_test = get_data_loader("MNIST", train=False)

# ------------
#  Load Model
# ------------

print("---> preparing model...")

target_encoder = LeNetEncoder()

# Select model that was trained on the correct dataset
if opt.source == "SVHN":
    target_encoder_name = "ADDA-target-encoder-SVHN_MNISTM.pt"
    target_encoder_pth = os.path.join(opt.save_dir, target_encoder_name)

elif opt.source == "MNISTM":
    target_encoder_name = "ADDA-target-encoder-MNISTM_SVHN.pt"
    target_encoder_pth = os.path.join(opt.save_dir, target_encoder_name)

# Load model
if os.path.exists(target_encoder_pth):
    print("---> found previously saved {}, loading checkpoint...".format(
        target_encoder_name))
    target_encoder.load_state_dict(torch.load(target_encoder_pth))
Exemplo n.º 9
0
if __name__ == '__main__':
    # init random seed

    os.environ["CUDA_VISIBLE_DEVICES"] = str(params.num_gpu)

    init_random_seed(params.manual_seed)

    # load dataset
    src_data_loader = get_data_loader(params.src_dataset)
    src_data_loader_eval = get_data_loader(params.src_dataset, train=False)
    tgt_data_loader = get_data_loader(params.tgt_dataset)
    tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False)

    # load models
    src_encoder = init_model(net=LeNetEncoder(),
                             restore=params.src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=params.src_classifier_restore)
    tgt_encoder = init_model(net=LeNetEncoder(),
                             restore=params.tgt_encoder_restore)
    critic = init_model(Discriminator(input_dims=params.d_input_dims,
                                      hidden_dims=params.d_hidden_dims,
                                      output_dims=params.d_output_dims),
                        restore=params.d_model_restore)

    # train source model
    print_log("=== Training classifier for source domain ===")
    print(">>> Source Encoder <<<")
    print(src_encoder)
    print(">>> Source Classifier <<<")
Exemplo n.º 10
0
    logger = Logger(logs_path)

    # load dataset SM
    # src_data_loader = get_data_loader(cfg.src_dataset)
    # src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False)
    # tgt_data_loader = get_data_loader(cfg.tgt_dataset)
    # tgt_data_loader_eval = get_data_loader(cfg.tgt_dataset, train=False)

    # load dataset UM MU
    src_data_loader = get_data_loader(cfg.src_dataset, sample=True)
    src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False)
    tgt_data_loader = get_data_loader(cfg.tgt_dataset, sample=True)
    tgt_data_loader_eval = tgt_data_loader

    # load models
    src_encoder = init_model(net=LeNetEncoder(cfg.inputc),
                             restore=cfg.src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(ncls=cfg.ncls),
                                restore=cfg.src_classifier_restore)
    tgt_classifier = init_model(net=LeNetClassifier(ncls=cfg.ncls),
                                restore=cfg.src_classifier_restore)
    tgt_encoder = init_model(net=LeNetEncoder(cfg.inputc),
                             restore=cfg.tgt_encoder_restore)
    critic = init_model(Discriminator_feat(input_dims=cfg.d_input_dims,
                                           hidden_dims=cfg.d_hidden_dims,
                                           output_dims=cfg.d_output_dims),
                        restore=cfg.d_model_restore)
    generator = init_model(net=LeNetGenerator(input_dims=cfg.g_input_dims,
                                              outputc=cfg.inputc),
                           restore=cfg.src_generator_restore)
    # generator = init_model(net=LeNetGenerator(input_dims=cfg.g_input_dims, outputc = cfg.inputc),
Exemplo n.º 11
0
        src_encoder_restore = "snapshots/USPS2MNI*/ADDA-source-encoder-final.pt"
        src_classifier_restore = "snapshots/USPS2MNI*/ADDA-source-classifier-final.pt"
        src_model_trained = True
        tgt_dataset = "MNIST"
        tgt_encoder_restore = "snapshots/USPS2MNI*/ADDA-target-encoder-final.pt"
        tgt_model_trained = True
        d_model_restore = "snapshots/USPS2MNI*/ADDA-critic-final.pt"
        print("LOAD mnist MODEL OK")
    ###
    src_data_loader = get_data_loader(src_dataset, testpath)
    src_data_loader_eval = get_data_loader(src_dataset, testpath, train=False)
    tgt_data_loader = get_data_loader(tgt_dataset, testpath)
    tgt_data_loader_eval = get_data_loader(tgt_dataset, testpath, train=False)

    # load models
    src_encoder = init_model(net=LeNetEncoder(), restore=src_encoder_restore)
    src_classifier = init_model(net=LeNetClassifier(),
                                restore=src_classifier_restore)
    tgt_encoder = init_model(
        net=LeNetEncoder(),
        restore=tgt_encoder_restore)  #source and target will be the same one
    critic = init_model(Discriminator(input_dims=params.d_input_dims,
                                      hidden_dims=params.d_hidden_dims,
                                      output_dims=params.d_output_dims),
                        restore=d_model_restore)

    # eval target encoder on test set of target dataset
    print("START PREDICT..................................................")
    eval_tgt_hw(tgt_encoder, src_classifier, tgt_data_loader_eval,
                csv_output_path)
Exemplo n.º 12
0
src_dataloader = DataLoader(src_dataset, batch_size=batch_size, shuffle=True)
tgt_dataloader = DataLoader(tgt_dataset, batch_size=batch_size, shuffle=True)
val_src_dataloader = DataLoader(val_src_dataset, batch_size=batch_size, shuffle=False)
val_tgt_dataloader = DataLoader(val_tgt_dataset, batch_size=batch_size, shuffle=False)

src_label = 0
tgt_label = 1

print(len(src_dataset), len(tgt_dataset), len(val_src_dataset), len(val_tgt_dataset))


# In[4]:


# init models
src_encoder = LeNetEncoder()
tgt_encoder = LeNetEncoder()
src_classifier = LeNetClassifier()
discriminator = Discriminator()

# init weights
src_encoder.apply(init_weights)
tgt_encoder.apply(init_weights)
src_classifier.apply(init_weights)
discriminator.apply(init_weights)

# to device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
src_encoder.to(device)
tgt_encoder.to(device)
src_classifier.to(device)
Exemplo n.º 13
0
# In[2]:

source_train_loader, source_test_loader = utils.load_data("MNIST")
target_train_loader, target_test_loader = utils.load_data("USPS")
batch_size = 256

# In[3]:

images, labels = next(iter(target_test_loader))
images.size()

# In[4]:

DEVICE = 'cuda:1'
#Load Models
src_encoder = LeNetEncoder().to(DEVICE)
src_classifier = LeNetClassifier().to(DEVICE)

tgt_encoder = LeNetEncoder().to(DEVICE)
discriminator = Discriminator(input_dims=500, hidden_dims=500,
                              output_dims=2).to(DEVICE)

# In[5]:

#Print models for source
print(src_encoder)
print(src_classifier)

# In[6]:

try:
Exemplo n.º 14
0
def main(args):
    # read from args
    test_path = args.test_path
    d_target = args.d_target
    output_predict_path = args.output_predict_path

    ########## Arguments ##########
    batch_size = 128

    # svhn, usps, mnistm
    if d_target == "mnistm":
        d_source = "usps"
    elif d_target == "svhn":
        d_source = "mnistm"
    else:
        d_source = "svhn"

    output_src_classifier_path = "./hw3-4/models/src_classifier_{}_{}.pth".format(
        d_source, d_target)
    output_tgt_encoder_path = "./hw3-4//models/tgt_encoder_{}_{}.pth".format(
        d_source, d_target)

    #############################

    dataset = ReproduceDataset(test_path)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # init models
    tgt_encoder = LeNetEncoder()
    src_classifier = LeNetClassifier()

    # to device
    tgt_encoder.to(device)
    src_classifier.to(device)

    # init weights
    tgt_encoder.load_state_dict(torch.load(
        output_tgt_encoder_path, map_location=device))

    src_classifier.load_state_dict(torch.load(
        output_src_classifier_path, map_location=device))

    tgt_encoder.eval()
    src_classifier.eval()

    all_pred = []

    for idx, targets in enumerate(dataloader):
        target_images = targets.to(device)
        target_bs = target_images.shape[0]

        with torch.no_grad():
            preds = src_classifier(tgt_encoder(target_images))

        # calculate label acc
        _, pred_labels = torch.max(preds, 1)
        all_pred.append(pred_labels)

    # save to predict
    pred = torch.cat(all_pred).cpu().numpy()
    image_names = ['{:05}.png'.format(i) for i in range(len(pred))]

    pd.DataFrame({
        'image_name': image_names,
        'label': pred
    }).to_csv(output_predict_path, index=False)