src_dir = 'amazon'
tgt_train_dir = 'dslr_tgt'
tgt_dir = 'webcam'
test_dir = 'test'
cuda = torch.cuda.is_available()
test_loader = get_dataloader(data_dir, tgt_dir, batch_size=15, train=False)
# lam for confusion
lam = 0.01
# nu for soft
nu = 0.1

# load the pretrained and fine-tuned alex model

encoder = Encoder()
cl_classifier = ClassClassifier(num_classes=31)
dm_classifier = DomainClassifier()

encoder.load_state_dict(torch.load('./checkpoints/a2w/src_encoder_final.pth'))
cl_classifier.load_state_dict(
    torch.load('./checkpoints/a2w/src_classifier_final.pth'))

src_train_loader = get_dataloader(data_dir, src_dir, batch_size, train=True)
tgt_train_loader = get_dataloader(data_dir,
                                  tgt_train_dir,
                                  batch_size,
                                  train=True)
criterion = nn.CrossEntropyLoss()
# criterion_kl = nn.KLDivLoss()
if cuda:
    criterion = criterion.cuda()
    cl_classifier = cl_classifier.cuda()
示例#2
0
    ])

source_dataset = ImageFolder(args.source, transform=transform_source)
target_dataset = ImageFolder(args.target, transform=transform_target)

source_loader = DataLoader(source_dataset,
                           batch_size=args.batch_size,
                           shuffle=True)
target_loader = DataLoader(target_dataset,
                           batch_size=args.batch_size,
                           shuffle=True)

# models
F = FeatureExtractor(resnet=args.resnet_type).to(device)
C = LabelPredictor(resnet=args.resnet_type).to(device)
D = DomainClassifier(resnet=args.resnet_type).to(device)

class_criterion = nn.CrossEntropyLoss()
domain_criterion = nn.BCEWithLogitsLoss()

opt_F = optim.AdamW(F.parameters())
opt_C = optim.AdamW(C.parameters())
opt_D = optim.AdamW(D.parameters())

# train
F.train()
D.train()
C.train()
lamb, p, gamma, now, tot = 0, 0, 10, 0, len(source_loader) * args.n_epoch
if not args.adaptive_lamb:
    lamb = 0.1
示例#3
0
    src_X_train = review2seq(src_X_train)
    src_X_test = review2seq(src_X_test)
    tgt_X = review2seq(tgt_X)

    # load dataset
    src_data_loader = get_data_loader(src_X_train, src_Y_train,
                                      args.batch_size, args.seqlen)
    src_data_loader_eval = get_data_loader(src_X_test, src_Y_test,
                                           args.batch_size, args.seqlen)
    tgt_data_loader = get_data_loader(tgt_X, tgt_Y, args.batch_size,
                                      args.seqlen)

    # load models
    encoder = BERTEncoder()
    cls_classifier = BERTClassifier()
    dom_classifier = DomainClassifier()

    if torch.cuda.device_count() > 1:
        encoder = torch.nn.DataParallel(encoder)
        class_classifier = torch.nn.DataParallel(cls_classifier)
        domain_encoder = torch.nn.DataParallel(dom_classifier)

    encoder = init_model(encoder, restore=param.encoder_restore)
    cls_classifier = init_model(cls_classifier,
                                restore=param.cls_classifier_restore)
    dom_classifier = init_model(dom_classifier,
                                restore=param.dom_classifier_restore)

    # freeze encoder params
    if torch.cuda.device_count() > 1:
        for params in encoder.module.encoder.embeddings.parameters():
示例#4
0
LRS = [1e-3, 3e-4]  # Taken from paper
LR = 1e-3
LAMBDA = 1e-3
SAVE_DIR = 'domain_saved_models'

# In[ ]:

DROPOUT = 0.1
BIDIRECTIONAL = False
ENCODING_LENGTH = 240
encoder_model = LSTM(embeddings, padding_idx, ENCODING_LENGTH, 1,
                     TRUNCATE_LENGTH, DROPOUT, BIDIRECTIONAL)

HIDDEN_DIM_1 = 300
HIDDEN_DIM_2 = 150
domain_model = DomainClassifier(ENCODING_LENGTH, HIDDEN_DIM_1, HIDDEN_DIM_2)

# In[ ]:

train_model(transfer_train_dataset,
            android_dev_dataset,
            android_test_dataset,
            encoder_model,
            domain_model,
            save_dir=SAVE_DIR,
            batch_size=BATCH_SIZE,
            margin=MARGIN,
            _lambda=LAMBDA,
            num_epochs=NUM_EPOCHS,
            lr=LR)
src_dir = 'amazon'
tgt_train_dir = 'dslr_tgt'
tgt_dir = 'webcam'
test_dir = 'test'
cuda = torch.cuda.is_available()
test_loader = get_dataloader(data_dir, tgt_dir, batch_size=15, train=False)
# lam for confusion
lam = 0.01
# nu for soft
nu = 0.1

# load the pretrained and fine-tuned alex model

encoder = Encoder()
cl_classifier = ClassClassifier(num_classes=31)
dm_classifier = DomainClassifier()

encoder.load_state_dict(torch.load('./checkpoints/a2w/src_encoder_final.pth'))
cl_classifier.load_state_dict(
    torch.load('./checkpoints/a2w/src_classifier_final.pth'))

src_train_loader = get_dataloader(data_dir, src_dir, batch_size, train=True)
tgt_train_loader = get_dataloader(data_dir,
                                  tgt_train_dir,
                                  batch_size,
                                  train=True)
criterion = nn.CrossEntropyLoss()
# criterion_kl = nn.KLDivLoss()
if cuda:
    criterion = criterion.cuda()
    cl_classifier = cl_classifier.cuda()