train_loader = torch.utils.data.DataLoader(dataset=bp_train_dataset,
                                           batch_size=batch_size, 
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=bp_test_dataset,
                                          batch_size=batch_size, 
                                          shuffle=False)

val_loader = torch.utils.data.DataLoader(dataset=bp_val_dataset,
                                          batch_size=batch_size, 
                                          shuffle=False)



model = vgg19_bn().to(device)
min_loss = 1000
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train the model
total_step = len(train_loader)


for epoch in range(num_epochs):
    for i,(data,reg,label) in enumerate(train_loader):
        #print(i)
        #print(data)
        # data = data.to(device)
        # label = label.to(device)
    clss = basepath.rstrip().split(os.path.sep)[-2]
    
    classes = os.listdir(os.path.join(basepath, "train"))
    concept_classifiers = {x:None for x in classes}

    data_params = {'path': basepath, 'batch_size': 128}
    train_loader, validation_loader = utils.make_folder_loaders(basepath)

    for concept_class in classes:
        train_params = {'description':  clss + "_" + concept_class,
                        'num_epochs': 20, 'check_point': 5,
                        'train_loader': train_loader,
                        'validation_loader': validation_loader}

        if num_latent == 4096:
            concept_classifiers[concept_class] = utils.finetune_into_binary(vgg_model.vgg19_bn(pretrained=True))
        else:
            concept_classifiers[concept_class] = utils.finetune_into_binary_with_features(vgg_model.vgg19_bn(pretrained=True), num_latent=num_latent)
        concept_mapping = {i:0 for i in range(len(classes))}
        concept_mapping[train_loader.dataset.class_to_idx[concept_class]] = 1

        if mode == 'train':
            finetune_model(concept_classifiers[concept_class], train_params, concept_mapping)
        else:
            # Load model
            models_path = sys.argv[3]
            concept_classifiers[concept_class].load_state_dict(ch.load(os.path.join(models_path, train_params['description'] + ".pt")))
            concept_classifiers[concept_class].eval()
            # Calculate F-1 metrics
            gran = 20
            best_f1 = 0
     num_concepts = len(concept_loader[0].dataset.classes)
     mappings.append(num_concepts)
     concept_loaders.append(
         (batch_cycle_repeater(concept_loader[0]), concept_loader[1]))
     # Create latent ranges
     latent_range = []
     for i in range(num_concepts):
         latent_range.append(
             (start_index, start_index + per_class_concept_latent))
         start_index += per_class_concept_latent
     latent_ranges.append(latent_range)
 # Creaate normal CIFAR-10 loader
 cifar_loaders = utils.get_cifar_dataloaders()
 # Define model
 model = vgg_model.vgg19_bn(pretrained=False,
                            num_latent=per_class_concept_latent *
                            num_total_concepts).cuda()
 if mode == "train":
     model = nn.DataParallel(model)
     #  Train model with shared encoders
     nb_epochs = 200
     train_shared_latent_model(model,
                               nb_epochs,
                               cifar_loaders,
                               concept_loaders,
                               mappings,
                               latent_ranges,
                               alpha=alpha,
                               beta=beta)
 else:
     checkpoint = ch.load(
	else:
		latent = True
	print("Latent mode : %s" % latent)
	# Get CIFAR10 loaders
	trainloader, testloader = utils.get_cifar_dataloaders()
	X_train, Y_train = get_combined_data(trainloader)
	X_val,   Y_val   = get_combined_data(testloader)
	# Shift labels to cpu,numpy
	Y_train = Y_train.cpu().numpy()
	Y_val   = Y_val.cpu().numpy()
	# Use concept classifiers to get scores
	features_train, features_test = [], []
	for index, ccpath in tqdm(enumerate(os.listdir(cc_dir))):
		print(index, os.path.join(cc_dir, ccpath))
		if latent:
			model = utils.finetune_into_binary_with_features(vgg_model.vgg19_bn(pretrained=True), num_latent=80)
		else:
			model = utils.finetune_into_binary(vgg_model.vgg19_bn(pretrained=True))
		# Load weights into model
		model.load_state_dict(ch.load(os.path.join(cc_dir, ccpath)))
		# Set to evaluation mode
		model.eval()
		features_train.append(get_actual_scores(X_train, model, latent))
		features_test.append(get_actual_scores(X_val, model, latent))
		# Explicitly free memory
		del model

	features_train = ch.stack(features_train).squeeze(-1).numpy().transpose()
	features_test  = ch.stack(features_test).squeeze(-1).numpy().transpose()

	if len(features_train.shape) == 3:
Пример #5
0
    # wrapped_model = utils.PyTorchWrapper(model)
    # C1, C2
    models = []
    if latent:
        filename = "./meta_classifier_True"
    else:
        filename = "./meta_classifier_False"
    # Load meta-classifier
    clf = pickle.load(open(filename, 'rb'))

    print("[Concept Classifiers] Loading")
    gpu_devices = ['cuda:0', 'cuda:1', 'cuda:2', 'cuda:3']
    for i, ccpath in tqdm(enumerate(os.listdir((model_path)))):
        if latent:
            model = utils.finetune_into_binary_with_features(
                vgg_model.vgg19_bn(pretrained=True),
                num_latent=80,
                on_cpu=multi_gpus)
        else:
            model = utils.finetune_into_binary(
                vgg_model.vgg19_bn(pretrained=True), on_cpu=multi_gpus)
        # Load weights into model
        if multi_gpus:
            model = utils.WrappedModel(model)
            checkpoint = ch.load(os.path.join(model_path, ccpath),
                                 map_location=gpu_devices[i %
                                                          len(gpu_devices)])
            model = model.to(gpu_devices[i % len(gpu_devices)])
            model.load_state_dict(checkpoint)
        else:
            model.load_state_dict(ch.load(os.path.join(model_path, ccpath)))
Пример #6
0
if __name__ == "__main__":
    import sys
    model_path = sys.argv[1]
    is_latent = int(sys.argv[2])
    if is_latent == 1:
        latent = False
    else:
        latent = True
    # Load model
    per_class_concept_latent = 80
    num_total_concepts = 48
    gpu_devices = ['cuda:0', 'cuda:1', 'cuda:2', 'cuda:3']
    # C3
    if is_latent == 3:
        wrapped_model = vgg_model.vgg19_bn(
            pretrained=False,
            num_latent=per_class_concept_latent * num_total_concepts).cuda()
        checkpoint = ch.load(model_path)
        wrapped_model.load_state_dict(checkpoint.module.state_dict())
        wrapped_model.eval()
    # C1, C2
    else:
        # Load meta-classifier
        models = []
        if latent:
            filename = "./meta_classifier_True"
        else:
            filename = "./meta_classifier_False"
        multi_gpus = True
        clf = pickle.load(open(filename, 'rb'))
        for i, ccpath in tqdm(enumerate(os.listdir(model_path))):