Ejemplo n.º 1
0
def get_network(args, depth=10, width=10):
    """ return given network
    """
    if args.task == 'cifar10':
        nclass = 10
    elif args.task == 'cifar100':
        nclass = 100
    #Yang added none bn vggs
    if args.net == 'vgg11':
        if args.batch_norm:
            net = vgg11_bn(num_classes=nclass)
        else:
            net = vgg11(num_classes=nclass)
    elif args.net == 'vgg13':
        if args.batch_norm:
            net = vgg13_bn(num_classes=nclass)
        else:
            net = vgg13(num_classes=nclass)
    elif args.net == 'vgg16':
        if args.batch_norm:
            net = vgg16_bn(num_classes=nclass)
        else:
            net = vgg16(num_classes=nclass)
    elif args.net == 'vgg19':
        if args.batch_norm:
            net = vgg19_bn(num_classes=nclass)
        else:
            net = vgg19(num_classes=nclass)

    elif args.net == 'resnet':
        net = resnet(num_classes=nclass, depth=depth, width=width)
    # elif args.net == 'resnet34':
    #     net = resnet34(num_classes=nclass)
    # elif args.net == 'resnet50':
    #     net = resnet50(num_classes=nclass)
    # elif args.net == 'resnet101':
    #     net = resnet101(num_classes=nclass)
    # elif args.net == 'resnet152':
    #     net = resnet152(num_classes=nclass)

    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if args.gpu:  #use_gpu
        net = net.cuda()

    return net
    for r in ret:
        faces.append(ret)

X_test = faces
'''
for face in faces:
    print(face)
    for i in range(len(face)):
        temp_rows = []
        for j in range(len(face[i])):
            temp_rows.append(int(face[i][j]))
        temp_list.append(list(temp_rows))
    X_test.append(np.array(temp_list))
'''

#X_test = np.array([np.array(X_test)])
#X_test/=255
print(X_test)

model = vgg13()

model.load_weights('model_vgg_13_59.h5')

predictions = model.predict(X_test, batch_size=batch_size, verbose=1)

for i in range(len(predictions)):
    print(emotions[np.argmax(predictions[i])])
    #cv2.imshow(X_test[0][0][i])
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
Ejemplo n.º 3
0
if args.cuda:
    gpu_index = args.gpu_index
    torch.cuda.set_device(gpu_index)
    device = torch.device('cuda')
else:
    gpu_index = None
    device = torch.device('cpu')

train_data, _, channels, classes = data_loader.get_train_data(
    args.dataset,
    args.data_root,
    args.batch_size,
    normalize=args.data_normalization)

# classifier = models.get_classifier(channels, classes)
classifier = models.vgg13()
if gpu_index:
    classifier = classifier.cuda(gpu_index)

if args.generator:
    generator = models.get_generator(args.nz, channels, args.ngf)
    generator.load_state_dict(torch.load(args.generator))
    if gpu_index:
        generator = generator.cuda(gpu_index)
    generator = generator.eval()
    print('Using pre-trained generator. Generator loaded from:\n%s' %
          args.generator)

optimizer = optim.Adam(classifier.parameters(), lr=args.lr, betas=(0.5, 0.999))

run_loss = run_real = run_fake = 0.
Ejemplo n.º 4
0
print(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Random Seed: ", args.seed)
torch.manual_seed(args.seed)

if args.cuda:
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

print('load data: ', args.dataset)
train_loader, test_loader = data_loader.getTargetDataSet(
    args.dataset, args.batch_size, args.imageSize, args.dataroot)

print('Load model')
model = models.vgg13()
print(model)

if args.cuda:
    model.cuda()

# Center loss addition
if args.centerloss:
    classes = 10  # default for CIFAR & SVHN
    dim = 10
    centerloss = CenterLoss(num_classes=classes,
                            feat_dim=dim,
                            use_gpu=args.cuda)
    print(
        'Center loss component | Classes: {} | Features: {} | GPU: {}'.format(
            classes, dim, args.cuda))
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_root', required=True, help='path to datasets')
    parser.add_argument('--data_normalization', action='store_true', help='normalize datasets')
    parser.add_argument('--in_dataset', required=True, help='dataset used for training: svhn | cifar10')
    parser.add_argument('--out_datasets', required=True, nargs='+', help='datasets used for testing')
    parser.add_argument('--model_chp', required=True, help='path to VGG13 model checkpoint')
    parser.add_argument('--out_folder', required=True, help='folder to output images to')
    parser.add_argument('--cuda', action='store_true', help='use GPU')
    parser.add_argument('--gpu_index', type=int, default=0, help='GPU used for training, defaults to: "0"')
    args = parser.parse_args()

    batch_size = 128
    mappings = {
        'svhn': 'SVHN',
        'cifar10': 'CIFAR-10',
        'imagenet': 'Imagenet',
        'lsun': 'LSUN'
    }

    if args.cuda:
        gpu_index = args.gpu_index
    else:
        gpu_index = None

    _, in_test_data, _, _ = data_loader.get_train_data(
        args.in_dataset, args.data_root, batch_size, normalize=args.data_normalization
    )

    out_test_data = []
    for dataset in args.out_datasets:
        test_data, _, _ = data_loader.get_test_data(
                dataset, args.data_root, batch_size, normalize=args.data_normalization
            )
        out_test_data.append((dataset, test_data))

    model = models.vgg13()
    model.load_state_dict(torch.load(args.model_chp))
    if args.cuda:
        model = model.cuda(gpu_index)
    model.eval()

    # In-dataset
    scores_in = np.array([])

    for data, _ in in_test_data:
        if args.cuda:
            data = data.cuda(gpu_index)

        logits = model(data)
        probs = F.softmax(logits, dim=1)
        max_probs, _ = torch.max(probs, dim=1)
        max_probs = max_probs.detach().cpu()
        scores_in = np.concatenate((scores_in, max_probs), axis=0)

    # Plot in-dataset
    pd_in_path = os.path.join(args.out_folder, 'pd_in_%s.png' % args.in_dataset)
    plot_predictive_probability_distribution(
        scores_in, 'red', mappings[args.in_dataset], pd_in_path, plot_ideal=False
    )
    print('Plot for %s saved to: %s' % (args.in_dataset, pd_in_path))

    for dataset, test_loader in out_test_data:
        scores_out = np.array([])

        for data, _ in test_loader:
            if args.cuda:
                data = data.cuda(gpu_index)

            logits = model(data)
            probs = F.softmax(logits, dim=1)
            max_probs, _ = torch.max(probs, dim=1)
            max_probs = max_probs.detach().cpu()
            scores_out = np.concatenate((scores_out, max_probs), axis=0)
            
        # Plot out-dataset
        pd_out_path = os.path.join(args.out_folder, 'pd_out_%s.png' % dataset)
        plot_predictive_probability_distribution(
            scores_out, 'blue', mappings[dataset], pd_out_path
        )
        print('Plot for %s saved to: %s' % (dataset, pd_out_path))
print(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Random Seed: ", args.seed)
torch.manual_seed(args.seed)

if args.cuda:
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

print('load data: ', args.dataset)
train_loader, test_loader = data_loader.getTargetDataSet(
    args.dataset, args.batch_size, args.imageSize, args.dataroot)

print('Load model')
model = models.vgg13(num_classes=args.num_classes)
print(model)

print('load GAN')
nz = 100
netG = models.Generator(1, nz, 64, 3)  # ngpu, nz, ngf, nc
netD = models.Discriminator(1, 3, 64)  # ngpu, nc, ndf
# Initial setup for GAN
real_label = 1
fake_label = 0
criterion = nn.BCELoss()
fixed_noise = torch.Tensor(64, nz, 1, 1).normal_(0, 1)

if args.cuda:
    model.cuda()
    netD.cuda()
Ejemplo n.º 7
0
print('Run associated with the test: %s' % name)
print('Training data (in-distribution): %s' % args.in_dataset)
print('Test data (out-distributions): {}'.format(args.out_datasets))
print('Saving in-distribution performance results to:\n%s' % f_in_path)
print('Saving out-distribution performance results to:\n%s' % f_out_path)

with open(f_in_path, 'w') as report_in, open(f_out_path, 'w') as report_out:
    # Headers
    report_in.write('Checkpoint\tTest Accuracy\n')
    report_out.write(
        'Checkpoint\tOut Dataset\tTNR at TPR 95%\tAUROC\tDetection Accuracy\tAUPR In\tAUPR Out\n'
    )

    for chp_no, chp in checkpoints:
        # Model from checkpoint
        model = models.vgg13()  # Added
        #model = models.get_classifier(channels, classes)
        model.load_state_dict(torch.load(join(args.chp_folder, chp)))
        if args.cuda:
            model = model.cuda(gpu_index)
        model.eval()

        #########################
        # In-distribution tests #
        #########################
        tmp_rpt_in = []

        correct = 0
        total = 0

        for data, target in in_test_data: