convex_hull_weights = sum_weights([ multiply_weights(weight_dict_1, X[i, j]), multiply_weights(weight_dict_2, Y[i, j]), multiply_weights(weight_dict_3, Z[i, j]) ]) criterion = nn.CrossEntropyLoss(reduction='sum') def test(model, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.cuda(), target.cuda() output = model(data) test_loss += criterion(output, target).item() pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) return test_loss net = VGG('VGG16').cuda() net.load_state_dict(convex_hull_weights) Z_[i].append(test(net, test_loader)) np.save('./plots/X_cifar', X) np.save('./plots/Y_cifar', Y) np.save('./plots/Z_cifar', Z_)
test_loss /= len(test_loader.dataset) return test_loss global_vals = [] for i in trange(3): # natural # weight_dict = torch.load('model_weights/vgg_weights_{}.pth'.format(i), # map_location='cpu') # random weight_dict = torch.load( 'model_weights/vgg_random_weights_{}.pth'.format(i), map_location='cpu') net = VGG('VGG16').cuda() net.load_state_dict(weight_dict) I_w = test(net, test_loader) vals = [] for tick in trange(20): weight_dict_delta, delta = deepcopy(weight_dict),\ deepcopy(weight_dict) norm = 0 for key in list(weight_dict_delta.keys())[-2:]: delta[key] = torch.randn(delta[key].size()) norm += delta[key].norm().pow(2) norm = norm.pow(0.5) I_w_delta, r = I_w, 0. while abs(I_w - I_w_delta) < 0.05:
# model.load_state_dict(torch.load('{}.pth.tar'.format(icon))) # trained_model, val_acc_history, loss, acc = train_model(model, train_loader, test_loader, optimizer, 5) # print('{}_loss_{:2f}__acc_{:2f}.pth.tar'.format(icon, loss, acc)) # torch.save(trained_model.state_dict(), '{}.pth.tar'.format(icon)) # # ######################################################################################################### in_size = 64 icon = 'gun_butt' dataRoot = os.path.join("dataSets", icon, "train") out_num = len(os.listdir(dataRoot)) model = VGG(in_size, out_num) train_loader, test_loader = load_dataset(dataRoot, in_size) optimizer = optim.SGD(model.parameters(), lr=the_lr, momentum=0.9) model.load_state_dict(torch.load('{}.pth.tar'.format(icon))) trained_model, val_acc_history, loss, acc = train_model(model, train_loader, test_loader, optimizer, 10) print('{}_loss_{:2f}__acc_{:2f}.pth.tar'.format(icon, loss, acc)) torch.save(trained_model.state_dict(), '{}.pth.tar'.format(icon)) # # # ######################################################################################################## # in_size = 64 # icon = 'gun_name' # # dataRoot = os.path.join("dataSets", icon, "train") # out_num = len(os.listdir(dataRoot)) # # model = VGG(in_size, out_num) # train_loader, test_loader = load_dataset(dataRoot, in_size) # optimizer = optim.SGD(model.parameters(), lr=the_lr, momentum=0.9) #
global best_acc net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.cuda(), targets.cuda().long() outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar( batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss / (batch_idx + 1), 100. * correct / total, correct, total)) start_epoch = 0 for epoch in range(start_epoch, start_epoch + 150): train(epoch) test(epoch) torch.save(net.state_dict(), './model_weights/vgg_random_weights_{}.pth'.format(seed)) net.load_state_dict( torch.load('./model_weights/vgg_random_weights_{}.pth'.format(seed)))
from torchvision import transforms import torch from PIL import Image import os import numpy as np from net import VGG in_size = 64 out_num = 33 model = VGG(in_size, out_num) model.load_state_dict(torch.load('loss_0.001207__acc_5.000000.pth.tar')) model.eval() i_name = [ "ang", "burst2", "burst3", "com_ar", "com_sm", "fla_ar", "fla_sm", "full", "hal", "in_tab", "las", "lig", "single", "sto",