data_dir = '/home/lucliu/dataset/domain_adaptation/office31'
src_dir = 'amazon'
tgt_train_dir = 'dslr_tgt'
tgt_dir = 'webcam'
test_dir = 'test'
cuda = torch.cuda.is_available()
test_loader = get_dataloader(data_dir, tgt_dir, batch_size=15, train=False)
# lam for confusion
lam = 0.01
# nu for soft
nu = 0.1

# load the pretrained and fine-tuned alex model

encoder = Encoder()
cl_classifier = ClassClassifier(num_classes=31)
dm_classifier = DomainClassifier()

encoder.load_state_dict(torch.load('./checkpoints/a2w/src_encoder_final.pth'))
cl_classifier.load_state_dict(
    torch.load('./checkpoints/a2w/src_classifier_final.pth'))

src_train_loader = get_dataloader(data_dir, src_dir, batch_size, train=True)
tgt_train_loader = get_dataloader(data_dir,
                                  tgt_train_dir,
                                  batch_size,
                                  train=True)
criterion = nn.CrossEntropyLoss()
# criterion_kl = nn.KLDivLoss()
if cuda:
    criterion = criterion.cuda()
os.environ["CUDA_VISIBLE_DEVICES"] = "6"

# Parameters
batch_size = 15
data_dir = '/home/lucliu/dataset/domain_adaptation/office31'
src_dir = 'amazon'
tgt_dir = 'webcam'
#tgt_dir = 'dslr'
test_dir = 'test'
cuda = torch.cuda.is_available()
test_loader = get_dataloader(data_dir, tgt_dir, batch_size=15, train=False)

# load the pretrained and fine-tuned alex model
encoder = Encoder()
classifier = ClassClassifier(num_classes=31)

encoder.load_state_dict(
    torch.load('./checkpoints/a2w/no_soft_encoder6000.pth'))
classifier.load_state_dict(
    torch.load('./checkpoints/a2w/no_soft_class_classifier6000.pth'))

criterion = nn.CrossEntropyLoss()

if cuda:
    encoder = encoder.cuda()
    classifier = classifier.cuda()
    criterion = criterion.cuda()

encoder.eval()
classifier.eval()
Example #3
0
os.environ["CUDA_VISIBLE_DEVICES"] = "5"

num_classes = 31
# Parameters
batch_size = 15
data_dir = '/home/lucliu/dataset/domain_adaptation/office31'
src_dir = 'amazon'
#tgt_dir = 'webcam'
tgt_dir = 'dslr'
test_dir = 'test'
cuda = torch.cuda.is_available()
test_loader = get_dataloader(data_dir, tgt_dir, batch_size=15, train=False)

# load the pretrained and fine-tuned alex model
encoder = Encoder()
classifier = ClassClassifier(num_classes=31)

encoder.load_state_dict(torch.load('./checkpoints/a2w/src_encoder_final.pth'))
classifier.load_state_dict(
    torch.load('./checkpoints/a2w/src_classifier_final.pth'))

criterion = nn.CrossEntropyLoss()

if cuda:
    encoder = encoder.cuda()
    classifier = classifier.cuda()
    criterion = criterion.cuda()

encoder.eval()
classifier.eval()
# begin train
data_dir = '/home/lucliu/dataset/domain_adaptation/office31'
src_dir = 'amazon'
tgt_train_dir = 'dslr_tgt'
tgt_dir = 'webcam'
test_dir = 'test'
cuda = torch.cuda.is_available()
test_loader = get_dataloader(data_dir, tgt_dir, batch_size=15, train=False)
# lam for confusion 
lam = 0.01
# nu for soft
nu = 0.1

# load the pretrained and fine-tuned alex model

encoder = Encoder()
cl_classifier = ClassClassifier(num_classes=31)
dm_classifier = DomainClassifier()

encoder.load_state_dict(torch.load('./checkpoints/a2w/src_encoder_final.pth'))
cl_classifier.load_state_dict(torch.load('./checkpoints/a2w/src_classifier_final.pth'))

src_train_loader = get_dataloader(data_dir, src_dir, batch_size, train=True)
tgt_train_loader = get_dataloader(data_dir, tgt_train_dir, batch_size, train=True)
criterion = nn.CrossEntropyLoss()
# criterion_kl = nn.KLDivLoss()
if cuda:
    criterion = criterion.cuda()
    cl_classifier = cl_classifier.cuda()
    dm_classifier = dm_classifier.cuda()
    encoder = encoder.cuda()
#soft_labels = gen_soft_labels(31, src_train_loader, encoder, cl_classifier)
interval = 100
epochs = 1000
data_dir = '/home/lucliu/dataset/domain_adaptation/office31'
src_dir = 'amazon'
#src_dir = 'webcam'
cuda = torch.cuda.is_available() 
# dataloader
src_train_loader = get_dataloader(data_dir, src_dir, batch_size, train=True)
# model
# Pretrained Model
alexnet = torchvision.models.alexnet(pretrained=True)
pretrained_dict = alexnet.state_dict()
# Train source data
# Model parameters
src_encoder = Encoder()
src_classifier = ClassClassifier(num_classes=31)
src_encoder_dict = src_encoder.state_dict()
# Load pretrained model 
# filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in src_encoder_dict}
# overwrite entries in the existing state dict
src_encoder_dict.update(pretrained_dict) 
# load the new state dict
src_encoder.load_state_dict(src_encoder_dict)
optimizer = optim.SGD(
    list(src_encoder.parameters()) + list(src_classifier.parameters()),
    lr=lr,
    momentum=momentum,
    weight_decay=weight_decay)

criterion = nn.CrossEntropyLoss()