예제 #1
0
def main(config):
    logger = prepare_logger(config)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # get loaders
    if not config.is_train_source:
        target_loader = get_loader(type="MNIST",
                                   train=False,
                                   batch_size=config.batch_size)

    source_train_loader = get_loader(type="SVHN",
                                     train=True,
                                     batch_size=config.batch_size)
    source_test_loader = get_loader(type="SVHN",
                                    train=False,
                                    batch_size=config.batch_size)

    # build source classifier
    model_src = LeNet(config.num_gpus).to(device)
    if (not config.is_train_source) or config.is_finetune:
        model_src.load_state_dict(torch.load(config.model_dir))

    # train source classifier
    if config.is_train_source:
        logger.info("train source classifier..")
        train_source(model_src, source_train_loader, source_test_loader,
                     config, logger)
        logger.info("evaluate source classifier..")
        logger.info("test accurracy in source domain: %f\n" %
                    (evaluate(model_src, source_test_loader)))

    else:
        # initialize target classifer with source classifer
        model_trg = torch.load(open("./pretrained/lenet-source.pth", "rb"))

        # build discriminator
        D = Discriminator(config.num_gpus)

        # adaptation process
        logger.info("start adaptation process..")
        adapt_target_domain(D, model_src, model_trg, source_train_loader,
                            target_loader, config)
        logger.info("evaluate target classifier..")
        logger.info("accurracy in target domain: %f\n" %
                    (evaluate(model_trg, target_loader)))
예제 #2
0
import torch
import torchvision

from torch.utils.tensorboard import SummaryWriter

from models import Encoder, Decoder, Discriminator
from preprocessing import get_loader, inv_standardize
from utils import (Collector, reconstruction_loss_func, norm22)
from config import (knobs, log_dir_local_time, log_dir_last_modified,
                    checkpoints_dir_local_time, checkpoints_dir_last_modified,
                    interpolations_dir)

loader = get_loader()

encoder = Encoder().to(knobs["device"])
decoder = Decoder().to(knobs["device"])
discriminator = Discriminator().to(knobs["device"])

opt_encoder = torch.optim.Adam(encoder.parameters(), lr=knobs["lr_encoder"])
opt_decoder = torch.optim.Adam(decoder.parameters(), lr=knobs["lr_decoder"])
opt_discriminator = torch.optim.Adam(discriminator.parameters(),
                                     lr=knobs["lr_discriminator"])

collector_reconstruction_loss = Collector()
collector_fooling_term = Collector()
collector_error_discriminator = Collector()
collector_heuristic_discriminator = Collector()
collector_codes_min = Collector()
collector_codes_max = Collector()
if knobs["resume"]:
    writer = SummaryWriter(log_dir_last_modified)
예제 #3
0
import numpy as np
import tensorflow as tf

from train import Trainer
from config import get_config
from preprocessing import get_loader, prepare_dirs_and_logger

config, unparsed = get_config()

prepare_dirs_and_logger(config)

loader = get_loader(config.data_dir, config.batch_size)
trainer = Trainer(config, loader)
trainer.train()