Esempio n. 1
0
def run(args):
    train, test = util.get_dataset(args.dataset)
    names = ['all-one (standard)', 'linear']
    colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
    models = [
        VGG.VGG(10, cg.uniform, 'all'),
        VGG.VGG(10, cg.linear, 'slow_exp')
    ]
    comp_ratios = np.linspace(0.1, 1.0, 20)
    acc_dict = {}
    ratios_dict = {}
    for name, model in zip(names, models):
        util.load_or_train_model(model, train, test, args)
        acc_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
        ratios_dict[name] = [100. * cr for cr in comp_ratios]

    filename = "VGG_{}".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            title='VGG (CIFAR-10)',
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            ylim=(90, 100))
def run(args):
    if args.out_dir != None:
        if not os.path.exists(args.out_dir):
            try:
                os.mkdir(args.out_dir)
            except:
                print 'cannot make directory {}'.format(args.out_dir)
                exit()
        elif not os.path.isdir(args.out_dir):
            print 'file path {} exists but is not directory'.format(args.out_dir)
            exit()
    if args.type == 'vgg19':
        vgg = VGG19()
    else:
        vgg = VGG()
    content_image = open_and_resize_image(args.content, args.width, vgg)
    print 'loading content image completed'
    style_image = open_and_resize_image(args.style, args.width, vgg)
    if args.match_color_histogram:
        style_image = util.match_color_histogram(style_image, content_image)
    if args.luminance_only:
        content_image, content_iq = util.split_bgr_to_yiq(content_image)
        style_image, style_iq = util.split_bgr_to_yiq(style_image)
        content_mean = np.mean(content_image, axis=(1,2,3), keepdims=True)
        content_std = np.std(content_image, axis=(1,2,3), keepdims=True)
        style_mean = np.mean(style_image, axis=(1,2,3), keepdims=True)
        style_std = np.std(style_image, axis=(1,2,3), keepdims=True)
        style_image = (style_image - style_mean) / style_std * content_std + content_mean
    print 'loading style image completed'
    serializers.load_hdf5(args.model, vgg)
    print 'loading neural network model completed'
    optimizer = LBFGS(args.lr)
    content_layers = args.content_layers.split(',')
    style_layers = args.style_layers.split(',')

    def on_epoch_done(epoch, x, losses):
        if (epoch + 1) % args.save_iter == 0:
            image = cuda.to_cpu(x.data)
            if args.luminance_only:
                image = util.join_yiq_to_bgr(image, content_iq)
            image = vgg.postprocess(image[0], output_type='RGB').clip(0, 255).astype(np.uint8)
            Image.fromarray(image).save(os.path.join(args.out_dir, 'out_{0:04d}.png'.format(epoch + 1)))
            print 'epoch {} done'.format(epoch + 1)
            print 'losses:'
            label_width = max(map(lambda (name, loss): len(name), losses))
            for name, loss in losses:
                print '  {0:{width}s}: {1:f}'.format(name, loss, width=label_width)

    if args.method == 'mrf':
        model = MRF(vgg, optimizer, args.content_weight, args.style_weight, args.tv_weight, content_layers, style_layers, args.resolution_num, args.gpu, initial_image=args.initial_image, keep_color=args.keep_color)
    else:
        model = NeuralStyle(vgg, optimizer, args.content_weight, args.style_weight, args.tv_weight, content_layers, style_layers, args.resolution_num, args.gpu, initial_image=args.initial_image, keep_color=args.keep_color)
    out_image = model.fit(content_image, style_image, args.iter, on_epoch_done)
    out_image = cuda.to_cpu(out_image.data)
    if args.luminance_only:
        out_image = util.join_yiq_to_bgr(out_image, content_iq)
    image = vgg.postprocess(out_image[0], output_type='RGB').clip(0, 255).astype(np.uint8)
    Image.fromarray(image).save(os.path.join(args.out_dir, 'out.png'))
Esempio n. 3
0
def run(args):
    train, test = util.get_dataset(args.dataset)
    names = ['linear']
    colors = [vz.colors.linear_sm, vz.colors.linear_lg]
    models = [VGG.VGGMulti(10, cg.linear, profiles=[(0, 2), (2, 10)])]
    comp_ratios = np.linspace(0.1, 1, 20)
    acc_dict = {}
    ratios_dict = {}
    key_names = []
    for name, model in zip(names, models):
        util.train_model_profiles(model, train, test, args)
        for profile in range(len(model.profiles)):
            key = name + '-' + str(profile + 1)
            key_names.append(key)
            acc_dict[key] = util.sweep_idp(model,
                                           test,
                                           comp_ratios,
                                           args,
                                           profile=profile)
            ratios_dict[key] = [100. * cr for cr in comp_ratios]

    filename = "VGG_{}_multi".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            key_names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            title='VGG-16 (CIFAR-10)',
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)')
Esempio n. 4
0
    gpu = 0
    iter = 150
    save_iter = 200
    lr = 1.0
    content_weight = 5
    style_weight = 100
    tv_weight = 1e-3
    width = 500
    content_layers = '3_3,4_3'
    style_layers = '1_2,2_2,3_3,4_3'
    initial_image = 'random'
    resolution_num = 1
    keep_color = False


vgg = VGG()
serializers.load_hdf5(args.model, vgg)
print 'loading neural network model completed'
optimizer = LBFGS(args.lr)


def open_and_resize_image(path, target_width, model):
    image = Image.open(path).convert('RGB')
    width, height = image.size
    target_height = int(round(float(height * target_width) / width))
    image = image.resize((target_width, target_height), Image.BILINEAR)
    return np.expand_dims(
        model.preprocess(np.asarray(image, dtype=np.float32),
                         input_type='RGB'), 0)

Esempio n. 5
0
        convex_hull_weights = sum_weights([
            multiply_weights(weight_dict_1, X[i, j]),
            multiply_weights(weight_dict_2, Y[i, j]),
            multiply_weights(weight_dict_3, Z[i, j])
        ])

        criterion = nn.CrossEntropyLoss(reduction='sum')

        def test(model, test_loader):
            model.eval()
            test_loss = 0
            correct = 0
            with torch.no_grad():
                for data, target in test_loader:
                    data, target = data.cuda(), target.cuda()
                    output = model(data)
                    test_loss += criterion(output, target).item()
                    pred = output.max(1, keepdim=True)[1]
                    correct += pred.eq(target.view_as(pred)).sum().item()

            test_loss /= len(test_loader.dataset)
            return test_loss

        net = VGG('VGG16').cuda()
        net.load_state_dict(convex_hull_weights)
        Z_[i].append(test(net, test_loader))

np.save('./plots/X_cifar', X)
np.save('./plots/Y_cifar', Y)
np.save('./plots/Z_cifar', Z_)
Esempio n. 6
0
LR = args.lr
RECORD_EPOCH = args.record_epoch
NET = args.vgg_net
ONE_HOT = args.one_hot
SHOW_PIC_NUM = args.show_picture_num
DATASET_PATH = args.dataset_path
RGB = args.rgb
CSV = args.csv
SHOW_VALID_PIC = args.show_valid


DATASET_PATH, TRAIN_PATH, VALID_PATH = set_path(DEVICE)
# print(DATASET_PATH)

if torch.cuda.is_available():
    net = VGG(NET).cuda()
else:
    net = VGG(NET)

params = net.parameters()

if ONE_HOT:
    loss_func = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=LR)
else:
    loss_func = nn.BCELoss()
    optimizer = optim.RMSprop(net.parameters(), lr=LR, alpha=0.9)


def train(epoch=10, batch_size=10, dataset_path=None, one_hot=False):
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    return test_loss


global_vals = []
for i in trange(3):
    # natural
    # weight_dict = torch.load('model_weights/vgg_weights_{}.pth'.format(i),
    #                          map_location='cpu')
    # random
    weight_dict = torch.load(
        'model_weights/vgg_random_weights_{}.pth'.format(i),
        map_location='cpu')
    net = VGG('VGG16').cuda()
    net.load_state_dict(weight_dict)
    I_w = test(net, test_loader)

    vals = []
    for tick in trange(20):
        weight_dict_delta, delta = deepcopy(weight_dict),\
                                   deepcopy(weight_dict)

        norm = 0
        for key in list(weight_dict_delta.keys())[-2:]:
            delta[key] = torch.randn(delta[key].size())
            norm += delta[key].norm().pow(2)
        norm = norm.pow(0.5)

        I_w_delta, r = I_w, 0.
Esempio n. 8
0
testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

for seed in range(3):

    net = VGG('VGG16').cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=1e-2)

    def train(epoch):
        print('\nEpoch: %d' % epoch)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda().long()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
Esempio n. 9
0
    # model = VGG(in_size, out_num)
    # train_loader, test_loader = load_dataset(dataRoot, in_size)
    # optimizer = optim.SGD(model.parameters(), lr=the_lr, momentum=0.9)
    #
    # model.load_state_dict(torch.load('{}.pth.tar'.format(icon)))
    # trained_model, val_acc_history, loss, acc = train_model(model, train_loader, test_loader, optimizer, 5)
    # print('{}_loss_{:2f}__acc_{:2f}.pth.tar'.format(icon, loss, acc))
    # torch.save(trained_model.state_dict(), '{}.pth.tar'.format(icon))
    # # #########################################################################################################
    in_size = 64
    icon = 'gun_butt'

    dataRoot = os.path.join("dataSets", icon, "train")
    out_num = len(os.listdir(dataRoot))

    model = VGG(in_size, out_num)
    train_loader, test_loader = load_dataset(dataRoot, in_size)
    optimizer = optim.SGD(model.parameters(), lr=the_lr, momentum=0.9)

    model.load_state_dict(torch.load('{}.pth.tar'.format(icon)))
    trained_model, val_acc_history, loss, acc = train_model(model, train_loader, test_loader, optimizer, 10)
    print('{}_loss_{:2f}__acc_{:2f}.pth.tar'.format(icon, loss, acc))
    torch.save(trained_model.state_dict(), '{}.pth.tar'.format(icon))
    #
    # # ########################################################################################################
    # in_size = 64
    # icon = 'gun_name'
    #
    # dataRoot = os.path.join("dataSets", icon, "train")
    # out_num = len(os.listdir(dataRoot))
    #
Esempio n. 10
0
from torchvision import transforms
import torch

from PIL import Image
import os
import numpy as np

from net import VGG

in_size = 64
out_num = 33

model = VGG(in_size, out_num)
model.load_state_dict(torch.load('loss_0.001207__acc_5.000000.pth.tar'))
model.eval()

i_name = [
    "ang",
    "burst2",
    "burst3",
    "com_ar",
    "com_sm",
    "fla_ar",
    "fla_sm",
    "full",
    "hal",
    "in_tab",
    "las",
    "lig",
    "single",
    "sto",