コード例 #1
0
# find push with max accuracy
score_model_with_max_push_acc = -1
path_to_model_with_max_push_acc = None
for i in config.push_epochs:
    if i >= config.push_start and i < config.num_train_epochs:
        model_push_path = glob.glob(model_dir + f"/{i}*")[0]
        score = float(".".join(model_push_path.split(".")[-3:-1]))
        if score >= score_model_with_max_push_acc:
            score_model_with_max_push_acc = score
            path_to_model_with_max_push_acc = model_push_path

ppnet_test = construct_PPNet(
    base_architecture=config.base_architecture,
    pretrained=config.pretrained,
    img_size=config.img_size,
    prototype_shape=config.prototype_shape,
    num_classes=config.num_classes,
    prototype_activation_function=config.prototype_activation_function,
    add_on_layers_type=config.add_on_layers_type,
    batch_norm_features=config.batch_norm_features)

print('load model from ' + path_to_model_with_max_push_acc)
load_model_from_train_state(path_to_model_with_max_push_acc, ppnet_test)

ppnet_test = ppnet_test.cuda()

accu = test(model=ppnet_test,
            dataloader=test_loader,
            config=config,
            log_writer=log_writer,
            step=step,
コード例 #2
0
                                          batch_size=test_batch_size,
                                          shuffle=False,
                                          num_workers=4,
                                          pin_memory=False)

# we should look into distributed sampler more carefully at torch.utils.data.distributed.DistributedSampler(train_dataset)
log('training set size: {0}'.format(len(train_loader.dataset)))
log('push set size: {0}'.format(len(train_push_loader.dataset)))
log('test set size: {0}'.format(len(test_loader.dataset)))
log('batch size: {0}'.format(train_batch_size))

# construct the model
ppnet = model.construct_PPNet(
    base_architecture=base_architecture,
    pretrained=True,
    img_size=img_size,
    prototype_shape=prototype_shape,
    num_classes=num_classes,
    prototype_activation_function=prototype_activation_function,
    add_on_layers_type=add_on_layers_type)
#if prototype_activation_function == 'linear':
#    ppnet.set_last_layer_incorrect_connection(incorrect_strength=0)
ppnet = ppnet.cuda()
ppnet_multi = torch.nn.DataParallel(ppnet)
class_specific = True

# define optimizer
from settings import joint_optimizer_lrs, joint_lr_step_size
joint_optimizer_specs = \
[{'params': ppnet.features.parameters(), 'lr': joint_optimizer_lrs['features'], 'weight_decay': 1e-3}, # bias are now also being regularized
   {'params': ppnet.add_on_layers.parameters(), 'lr': joint_optimizer_lrs['add_on_layers'], 'weight_decay': 1e-3},
   {'params': ppnet.prototype_vectors, 'lr': joint_optimizer_lrs['prototype_vectors']},