def run(): # load source dataset src_data_loader = get_data_loader(params.src_dataset) src_data_loader_eval = get_data_loader(params.src_dataset, train=False) # load models src_encoder = init_model(net=LeNetEncoder(), restore=params.src_encoder_restore) src_classifier = init_model(net=LeNetClassifier(), restore=params.src_classifier_restore) # pre-train source model print("=== Training classifier for source domain ===") print(">>> Source Encoder <<<") im, _ = next(iter(src_data_loader)) summary(src_encoder, input_size=im[0].size()) print(">>> Source Classifier <<<") print(src_classifier) if not (src_encoder.restored and src_classifier.restored and params.src_model_trained): src_encoder, src_classifier = train_src( src_encoder, src_classifier, src_data_loader) # eval source model print("=== Evaluating classifier for source domain ===") eval_src(src_encoder, src_classifier, src_data_loader_eval)
def office(): init_random_seed(params.manual_seed) # load dataset src_data_loader = get_data_loader(params.src_dataset) src_data_loader_eval = get_data_loader(params.src_dataset, train=False) tgt_data_loader = get_data_loader(params.tgt_dataset) tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False) # load models src_encoder = init_model(net=LeNetEncoder(), restore=params.src_encoder_restore) src_classifier = init_model(net=LeNetClassifier(), restore=params.src_classifier_restore) tgt_encoder = init_model(net=LeNetEncoder(), restore=params.tgt_encoder_restore) critic = init_model(Discriminator(input_dims=params.d_input_dims, hidden_dims=params.d_hidden_dims, output_dims=params.d_output_dims), restore=params.d_model_restore) if not (src_encoder.restored and src_classifier.restored and params.src_model_trained): src_encoder, src_classifier = train_src( src_encoder, src_classifier, src_data_loader) # eval source model # print("=== Evaluating classifier for source domain ===") # eval_src(src_encoder, src_classifier, src_data_loader_eval) # train target encoder by GAN # init weights of target encoder with those of source encoder if not tgt_encoder.restored: tgt_encoder.load_state_dict(src_encoder.state_dict()) if not (tgt_encoder.restored and critic.restored and params.tgt_model_trained): tgt_encoder = train_tgt(src_encoder, tgt_encoder, critic, src_data_loader, tgt_data_loader) # eval target encoder on test set of target dataset print(">>> domain adaption <<<") acc = eval_tgt(tgt_encoder, src_classifier, tgt_data_loader_eval) return acc
def run(): # load dataset tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False) # Load models src_encoder = init_model(net=LeNetEncoder(), restore=params.src_encoder_restore) src_classifier = init_model(net=LeNetClassifier(), restore=params.src_classifier_restore) tgt_encoder = init_model(net=LeNetEncoder(), restore=params.tgt_encoder_restore) # Evalute target encoder on test set of target dataset print("=== Evaluating classifier for encoded target domain ===") print(">>> source only <<<") eval_tgt(src_encoder, src_classifier, tgt_data_loader_eval) print(">>> domain adaption <<<") eval_tgt(tgt_encoder, src_classifier, tgt_data_loader_eval)
def experiments(exp): #print(exp, case, affine, num_epochs) # init random seed #params.d_learning_rate = lr_d #params.c_learning_rate = lr_c init_random_seed(params.manual_seed) # load dataset src_dataset, tgt_dataset = exp.split('_') src_data_loader = get_data_loader(src_dataset) src_data_loader_eval = get_data_loader(src_dataset, train=False) tgt_data_loader = get_data_loader(tgt_dataset) tgt_data_loader_eval = get_data_loader(tgt_dataset, train=False) # load models src_encoder = init_model(net=LeNetEncoder(), restore=params.src_encoder_restore, exp=exp) src_classifier = init_model(net=LeNetClassifier(), restore=params.src_classifier_restore, exp=exp) tgt_encoder = init_model(net=LeNetEncoder(), restore=params.tgt_encoder_restore, exp=exp) critic = init_model(Discriminator(input_dims=params.d_input_dims, hidden_dims=params.d_hidden_dims, output_dims=params.d_output_dims), exp=exp, restore=params.d_model_restore) # train source model print("=== Training classifier for source domain ===") print(">>> Source Encoder <<<") print(src_encoder) print(">>> Source Classifier <<<") print(src_classifier) if not (src_encoder.restored and src_classifier.restored and params.src_model_trained): src_encoder, src_classifier = train_src(exp, src_encoder, src_classifier, src_data_loader, src_data_loader_eval) # eval source model print("=== Evaluating classifier for source domain ===") evaluation(src_encoder, src_classifier, src_data_loader_eval) # train target encoder by GAN print("=== Training encoder for target domain ===") print(">>> Target Encoder <<<") print(tgt_encoder) print(">>> Critic <<<") print(critic) # init weights of target encoder with those of source encoder if not tgt_encoder.restored: tgt_encoder.load_state_dict(src_encoder.state_dict()) if not (tgt_encoder.restored and critic.restored and params.tgt_model_trained): tgt_encoder = train_tgt(exp, src_encoder, tgt_encoder, critic, src_classifier, src_data_loader, tgt_data_loader, tgt_data_loader_eval) # eval target encoder on test set of target dataset print("=== Evaluating classifier for encoded target domain ===") print(">>> source only <<<") evaluation(src_encoder, src_classifier, tgt_data_loader_eval) print(">>> domain adaption <<<") evaluation(tgt_encoder, src_classifier, tgt_data_loader_eval)
from utils import get_data_loader, init_model, init_random_seed if __name__ == '__main__': # init random seed init_random_seed(params.manual_seed) # load dataset src_data_loader = get_data_loader(params.src_dataset) src_data_loader_eval = get_data_loader(params.src_dataset, train=False) tgt_data_loader = get_data_loader(params.tgt_dataset) tgt_data_loader_eval = get_data_loader(params.tgt_dataset, train=False) # load models src_encoder = init_model(net=LeNetEncoder(), restore=params.src_encoder_restore) src_classifier = init_model(net=LeNetClassifier(), restore=params.src_classifier_restore) tgt_encoder = init_model(net=LeNetEncoder(), restore=params.tgt_encoder_restore) critic = init_model(Discriminator(input_dims=params.d_input_dims, hidden_dims=params.d_hidden_dims, output_dims=params.d_output_dims), restore=params.d_model_restore) # train source model print("=== Training classifier for source domain ===") print(">>> Source Encoder <<<") print(src_encoder) print(">>> Source Classifier <<<") print(src_classifier)
# load dataset SM # src_data_loader = get_data_loader(cfg.src_dataset) # src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False) # tgt_data_loader = get_data_loader(cfg.tgt_dataset) # tgt_data_loader_eval = get_data_loader(cfg.tgt_dataset, train=False) # load dataset UM MU src_data_loader = get_data_loader(cfg.src_dataset, sample=True) src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False) tgt_data_loader = get_data_loader(cfg.tgt_dataset, sample=True) tgt_data_loader_eval = tgt_data_loader # load models src_encoder = init_model(net=LeNetEncoder(cfg.inputc), restore=cfg.src_encoder_restore) src_classifier = init_model(net=LeNetClassifier(ncls=cfg.ncls), restore=cfg.src_classifier_restore) tgt_classifier = init_model(net=LeNetClassifier(ncls=cfg.ncls), restore=cfg.src_classifier_restore) tgt_encoder = init_model(net=LeNetEncoder(cfg.inputc), restore=cfg.tgt_encoder_restore) critic = init_model(Discriminator_feat(input_dims=cfg.d_input_dims, hidden_dims=cfg.d_hidden_dims, output_dims=cfg.d_output_dims), restore=cfg.d_model_restore) generator = init_model(net=LeNetGenerator(input_dims=cfg.g_input_dims, outputc=cfg.inputc), restore=cfg.src_generator_restore) # generator = init_model(net=LeNetGenerator(input_dims=cfg.g_input_dims, outputc = cfg.inputc), # restore=None) discriminator = init_model(net=Discriminator_img(nc=cfg.inputc),
val_src_dataloader = DataLoader(val_src_dataset, batch_size=batch_size, shuffle=False) val_tgt_dataloader = DataLoader(val_tgt_dataset, batch_size=batch_size, shuffle=False) src_label = 0 tgt_label = 1 print(len(src_dataset), len(tgt_dataset), len(val_src_dataset), len(val_tgt_dataset)) # In[4]: # init models src_encoder = LeNetEncoder() tgt_encoder = LeNetEncoder() src_classifier = LeNetClassifier() discriminator = Discriminator() # init weights src_encoder.apply(init_weights) tgt_encoder.apply(init_weights) src_classifier.apply(init_weights) discriminator.apply(init_weights) # to device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") src_encoder.to(device) tgt_encoder.to(device) src_classifier.to(device) discriminator.to(device)
dataloader_target_test = torch.utils.data.DataLoader( dataset=data_test.DATA(opt), batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu ) # ------------ # Load Model # ------------ print("---> preparing models...") target_encoder = LeNetEncoder() source_classifier = LeNetClassifier() # Select models that were trained on the correct dataset if opt.target == "mnistm": target_encoder_name = "ADDA-target-encoder-SVHN_MNISTM.pt" source_classifier_name = "ADDA-source-classifier-SVHN_MNISTM.pt" target_encoder_pth = os.path.join(opt.save_dir, target_encoder_name) source_classifier_pth = os.path.join(opt.save_dir, source_classifier_name) elif opt.target == "svhn": target_encoder_name = "ADDA-target-encoder-MNISTM_SVHN.pt" source_classifier_name = "ADDA-source-classifier-MNISTM_SVHN.pt" target_encoder_pth = os.path.join(opt.save_dir, target_encoder_name) source_classifier_pth = os.path.join(opt.save_dir, source_classifier_name) else:
source_train_loader, source_test_loader = utils.load_data("MNIST") target_train_loader, target_test_loader = utils.load_data("USPS") batch_size = 256 # In[3]: images, labels = next(iter(target_test_loader)) images.size() # In[4]: DEVICE = 'cuda:1' #Load Models src_encoder = LeNetEncoder().to(DEVICE) src_classifier = LeNetClassifier().to(DEVICE) tgt_encoder = LeNetEncoder().to(DEVICE) discriminator = Discriminator(input_dims=500, hidden_dims=500, output_dims=2).to(DEVICE) # In[5]: #Print models for source print(src_encoder) print(src_classifier) # In[6]: try: src_encoder.load_state_dict(torch.load('src_encoder.pth'))
def main(args): # read from args test_path = args.test_path d_target = args.d_target output_predict_path = args.output_predict_path ########## Arguments ########## batch_size = 128 # svhn, usps, mnistm if d_target == "mnistm": d_source = "usps" elif d_target == "svhn": d_source = "mnistm" else: d_source = "svhn" output_src_classifier_path = "./hw3-4/models/src_classifier_{}_{}.pth".format( d_source, d_target) output_tgt_encoder_path = "./hw3-4//models/tgt_encoder_{}_{}.pth".format( d_source, d_target) ############################# dataset = ReproduceDataset(test_path) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # init models tgt_encoder = LeNetEncoder() src_classifier = LeNetClassifier() # to device tgt_encoder.to(device) src_classifier.to(device) # init weights tgt_encoder.load_state_dict(torch.load( output_tgt_encoder_path, map_location=device)) src_classifier.load_state_dict(torch.load( output_src_classifier_path, map_location=device)) tgt_encoder.eval() src_classifier.eval() all_pred = [] for idx, targets in enumerate(dataloader): target_images = targets.to(device) target_bs = target_images.shape[0] with torch.no_grad(): preds = src_classifier(tgt_encoder(target_images)) # calculate label acc _, pred_labels = torch.max(preds, 1) all_pred.append(pred_labels) # save to predict pred = torch.cat(all_pred).cpu().numpy() image_names = ['{:05}.png'.format(i) for i in range(len(pred))] pd.DataFrame({ 'image_name': image_names, 'label': pred }).to_csv(output_predict_path, index=False)