'Mongolian': 33, 'Aurek': 34, 'Angelic': 23, 'ULOG': 33, 'Oriya': 33, 'Avesta': 31, 'Tibetan': 43, 'Tengwar': 28, 'Keble': 25, 'Ge_ez': 31, 'Glagolitic': 46 } model = VGG(n_layer='4+2', in_channels=1).to(device) model.load_state_dict(torch.load(args.pretrain_dir), strict=False) model.last = Identity() init_feat_extractor = model acc = {} nmi = {} ari = {} for key, alphabetStr in omniglot_evaluation_alphabets_mapping.items(): runner_name = os.path.basename(__file__).split(".")[0] model_dir = args.exp_root + '{}/{}'.format(runner_name, args.subfolder_name) if not os.path.exists(model_dir): os.makedirs(model_dir) args.model_dir = model_dir + '/' + 'vgg6_{}.pth'.format(alphabetStr) args.save_txt_path = args.exp_root + '{}/{}'.format( runner_name, args.save_txt_name) train_Dloader, eval_Dloader = omniglot_alphabet_func( alphabet=alphabetStr, background=False,
num_workers=2) testset = CIFAR10(root=args.dataset_root, train=True, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=1) state_dict_imagenet = torch.load(args.pretrain_dir) del state_dict_imagenet['linear.0.weight'] del state_dict_imagenet['linear.0.bias'] model = ResNet(BasicBlock, [2, 2, 2, 2], args.n_clusters).to(device) model.load_state_dict(state_dict_imagenet, strict=False) model.linear = Identity() init_acc, init_nmi, init_ari, init_centers, init_probs = init_prob_kmeans( model, trainloader, args) args.p_targets = target_distribution(init_probs) model = ResNet(BasicBlock, [2, 2, 2, 2], args.n_clusters).to(device) model.load_state_dict(state_dict_imagenet, strict=False) model.center = Parameter(torch.Tensor(args.n_clusters, args.n_clusters)) model.center.data = torch.tensor(init_centers).float().to(device) warmup_train(model, trainloader, testloader, args) if args.DTC == 'Baseline': Baseline_train(model, trainloader, testloader, args) elif args.DTC == 'PI': PI_train(model, trainloader_twice, testloader, args) elif args.DTC == 'TE':