Exemplo n.º 1
0
 994: 'stinkhorn, carrion fungus',
 995: 'earthstar',
 996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
 997: 'bolete',
 998: 'ear, spike, capitulum',
 999: 'toilet tissue, toilet paper, bathroom tissue'}


if __name__ == '__main__':
    advs_save_path = 'adv_data/'
    r_save_path = 'r_data/'
    model = models.resnet152(pretrained=True).eval()
    preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
    # preprocessing = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
    images, labels, file_names = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=1, vis=True))
    file_names = file_names.raw

    prediction = accuracy(fmodel, images, labels, True).raw.cpu().detach().numpy()

    attack = FGSM()
    # attack = LinfPGD()
    # attack = L2DeepFoolAttack()
    epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
    advs, _, _ = attack(fmodel, images, labels, epsilons=epsilons)

    for i in range(len(epsilons)):
        r = advs[0] - advs[i]
        attack_labels = accuracy(fmodel, advs[i], labels, True).raw.cpu().detach().numpy()

        imgs_path = advs_save_path + str(epsilons[i]) + '/'
Exemplo n.º 2
0
# nu = np.ones(2)*1000

informative_priors = [a,b,V,m,u]
with open('informative_priors2.pkl', 'wb') as f:
    pickle.dump(informative_priors, f)
"""
nu = d is the least informative prior, and the smallest, so  a large nu
is more informative. On its own this makes the lam samples huge, so
we also give W a small prior, the scale - intuitively saying that we are 
sampling lam from a small space.

"the prior mean of Wishart(W,nu) is nu*W, so a reasonable choice for W would 
be a prior guess for the precision / nu"
This is what we have done here, divided W by nu. Nice!
"""

# the trick to informative priors - 
# alpha doesnt much matter, apparently


title = ''
for i in range(10):
    title = '%d'%i
    Z, pi, mu, lam, r_nk = samples(alpha, beta, W, m, nu, X[0], K) 
    plot_GMM(X, mu, lam, pi, centres, covs, K, title, cols=['r', 'b'])

    inv_lam_beta = [inv(beta[k]*lam[k]) for k in range(K)]
    ccols = ['r--', 'b--']
    for k in range(K):
        x,y = draw_ellipse(m[k],inv_lam_beta[k])
        plt.plot(x,y,ccols[k])

    training_set = CustomDataset(input_path=input_images, target_path=target_images, height=height, width=width, transform=transform)
    train_loader = DataLoader(training_set, batch_size=8, shuffle=True)

    import time
    since = time.time()
    epoch_num = 150
    best_val_acc = 0
    total_loss_val, total_acc_val = [],[]
    print ('Training')
    for epoch in range(1, epoch_num+1):
        loss_train, total_loss_train = train(train_loader, model, criterion, optimizer, epoch, device)
        if epoch%20 == 0 or epoch == 2:
            print (f'Epoch: {epoch}')
            samples(input_images[0], target_images[0], model)
            img = predict(model, input_images[0], device)
            pilimg = Image.fromarray(np.uint8(img))
            pilimg.save('pred/' + str(epoch)+'.png')

    print ('Time Taken: ',time.time()-since)
    fig = plt.figure(num = 1)
    fig1 = fig.add_subplot(2,1,1)
    # fig2 = fig.add_subplot(2,1,2)
    fig1.plot(total_loss_train, label = 'training loss')
    # fig2.plot(total_loss_val, label = 'validation loss')
    plt.legend()
    plt.show()


    for i in range (5):
Exemplo n.º 4
0
    for start_letter in start_letters:
        print(sample2(category, rnn, start_letter))
####


for iter in range(1, n_iters + 1):
    output, loss = train(*randomTrainingExample())
    total_loss += loss

    if iter % print_every == 0:
        print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss))
        samples('Russian', 'RUS')
        samples('German', 'GER')
        samples('Spanish', 'SPA')
        samples('Chinese', 'CHI')
        utils.samples('Russian', all_categories, n_letters, all_letters, rnn, 'RUS', start_token=False)
        utils.samples('German', all_categories, n_letters, all_letters, rnn, 'GER', start_token=False)
        utils.samples('Spanish', all_categories, n_letters, all_letters, rnn, 'SPA', start_token=False)
        utils.samples('Chinese', all_categories, n_letters, all_letters, rnn, 'CHI', start_token=False)

    if iter % plot_every == 0:
        all_losses.append(total_loss / plot_every)
        total_loss = 0


######################################################################
# Plotting the Losses
# -------------------
#
# Plotting the historical loss from all\_losses shows the network
# learning:
Exemplo n.º 5
0
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
    total_robust_acc = 0
    # for index in range(0, 15, 5):
    #     images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=5, index=index))
    #     print(accuracy(fmodel, images, labels, False))
    #
    #     attack = FGSM()
    #     epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
    #     advs, _, success = attack(fmodel, images, labels, epsilons=epsilons)
    #
    #     robust_accuracy = 1 - success.float32().mean(axis=-1)
    #     total_robust_acc = total_robust_acc + robust_accuracy
    #     # for eps, acc in zip(epsilons, robust_accuracy):
    #     #     print(eps, acc.item())
    # for eps, acc in zip(epsilons, total_robust_acc/4):
    #     print(eps, acc.item())

    images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=20))
    print(accuracy(fmodel, images, labels, False))

    #attack = LinfPGD()
    #attack = L2DeepFoolAttack()
    attack = FGSM()
    epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
    advs, _, success = attack(fmodel, images, labels, epsilons=epsilons)

    robust_accuracy = 1 - success.float32().mean(axis=-1)
    total_robust_acc = total_robust_acc + robust_accuracy
    for eps, acc in zip(epsilons, robust_accuracy):
        print(eps, acc.item())