result = open( os.path.join( args.result, "Office31_HAFN_" + args.task + '_' + args.post + '.' + args.repeat + "_score.txt"), "a") t_root = os.path.join(args.data_root, args.target, "images") t_label = os.path.join(args.data_root, args.target, "label.txt") data_transform = transforms.Compose([ transforms.Scale((256, 256)), transforms.CenterCrop((224, 224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) t_set = OfficeImage(t_root, t_label, data_transform) # assert len(t_set) == 795 t_loader = torch.utils.data.DataLoader(t_set, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers) netG = ResBase50().cuda() netF = ResClassifier(class_num=args.class_num, extract=False, dropout_p=args.dropout_p).cuda() netG.eval() netF.eval() for epoch in range(args.epoch / 2, args.epoch + 1): if epoch % 10 != 0:
s2_weight = args.s2_weight lr = args.lr beta1 = args.beta1 beta2 = args.beta2 gpu_id = args.gpu_id num_classes = args.num_classes threshold = args.threshold log_interval = args.log_interval cls_epoches = args.cls_epoches gan_epoches = args.gan_epoches s1_label = "/home/silvia/MSDA/A_W_2_D_Open/bvlc_A_W_2_D/data/amazon.txt" s2_label = "/home/silvia/MSDA/A_W_2_D_Open/bvlc_A_W_2_D/data/webcam.txt" t_label = "/home/silvia/MSDA/A_W_2_D_Open/bvlc_A_W_2_D/data/dslr.txt" s1_set = OfficeImage(s1_label, split="train") s2_set = OfficeImage(s2_label, split="train") t_set = OfficeImage(t_label, split="train") t_set_test = OfficeImage(t_label, split="test") assert len(s1_set) == 2817 assert len(s2_set) == 795 assert len(t_set) == 498 assert len(t_set_test) == 498 s1_loader_raw = torch.utils.data.DataLoader(s1_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) s2_loader_raw = torch.utils.data.DataLoader(s2_set, batch_size=batch_size,
log_file = osp.join( log_dir, os.path.abspath(__file__).split('/')[-1].split('.')[0] + '.txt') if os.path.isfile(log_file): os.remove(log_file) logger = get_log(log_file) s1_root = os.path.join(data_root, args.s1) s1_label = os.path.join(data_root, args.s1 + ".txt") s2_root = os.path.join(data_root, args.s2) s2_label = os.path.join(data_root, args.s2 + ".txt") s3_root = os.path.join(data_root, args.s3) s3_label = os.path.join(data_root, args.s3 + ".txt") t_root = os.path.join(data_root, args.t) t_label = os.path.join(data_root, args.t + ".txt") s1_set = OfficeImage(s1_root, s1_label, split="train") s2_set = OfficeImage(s2_root, s2_label, split="train") s3_set = OfficeImage(s3_root, s3_label, split="train") t_set = OfficeImage(t_root, t_label, split="train") t_set_test = OfficeImage(t_root, t_label, split="test") assert len(s1_set) == 2427 assert len(s2_set) == 4439 assert len(s3_set) == 4357 assert len(t_set) == 4365 assert len(t_set_test) == 4365 s1_loader_raw = torch.utils.data.DataLoader(s1_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=True) s2_loader_raw = torch.utils.data.DataLoader(s2_set,
print_args(args) source_root = os.path.join(args.data_root, args.source, "images") source_label = os.path.join(args.data_root, args.source, "label.txt") target_root = os.path.join(args.data_root, args.target, "images") target_label = os.path.join(args.data_root, args.target, "label.txt") train_transform = transforms.Compose([ transforms.Scale((256, 256)), transforms.RandomCrop((224, 224)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) source_set = OfficeImage(source_root, source_label, train_transform) target_set = OfficeImage(target_root, target_label, train_transform) source_loader = torch.utils.data.DataLoader(source_set, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers) target_loader = torch.utils.data.DataLoader(target_set, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers) netG = ResBase50().cuda() netF = ResClassifier(class_num=args.class_num, extract=args.extract, dropout_p=args.dropout_p).cuda()