input_x, target_y, Winit = T.tensor4("input"), T.vector(
        "target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)

    net = ConvLayer(net, 20, 5, W=init.Normal())
    net = MaxPool2DLayer(net, 2)

    net = ConvLayer(net, 50, 5, W=init.Normal())
    net = MaxPool2DLayer(net, 2)

    net = ll.DenseLayer(net, 500, W=init.Normal())
    net = ll.DenseLayer(net, nclass, W=init.Normal(), nonlinearity=nl.softmax)

    return net, input_x, target_y, 1


num_epochs, batch_size, verbose, dataset = 200, 100, 1, 'mnist'
optp = lambda epoch: optpolicy.lr_linear(epoch, 1e-4)
arch = net_lenet5

net = run_experiment(dataset,
                     num_epochs,
                     batch_size,
                     arch,
                     objectives.sgvlb,
                     verbose,
                     optp,
                     optpolicy.rw_linear,
                     optimizer='adam',
                     da=True)
Ejemplo n.º 2
0
import warnings
from nets import optpolicy
import experiments.utils
from experiments.utils import run_experiment
import numpy as np
import os
from experiments.lenet.lenet import FC_Net, FC_BinDO_Net
from experiments.lenet.lenet import WN_BinDO_Net, WN_Net
import torch
import torch.nn as nn

dataset = 'mnist'
criterion = nn.CrossEntropyLoss().cuda()
num_epochs, batch_size, verbose = 200, 100, 1

optpol_linear = lambda epoch: optpolicy.lr_linear(num_epochs, epoch, 1e-3)

arch = WN_Net
noise_type = None
alpha = None
#alphas = np.logspace(np.log10(0.01), np.log10(3), 8)
noise_magnitude = True
magn_vars = [0.01, 0.05, 0.1, 0.5, 1, 2, 5, 10]

folder_name = 'wn_magn_batch'
filename = 'wn_magn_batch'
trainset_sizes = [100, 500, 1000, 5000, 10000, 50000]
ave_times = 50

if not os.path.exists('./experiments/logs'):
    os.mkdir('./experiments/logs')