Пример #1
0
Файл: util.py Проект: yw774/neon
def train_regressor(orig_wordvecs, w2v_W, w2v_vocab):
    """
    Return regressor to map word2vec to RNN word space

    Function modified from:
    https://github.com/ryankiros/skip-thoughts/blob/master/training/tools.py
    """
    # Gather all words from word2vec that appear in wordvecs
    d = defaultdict(lambda: 0)
    for w in w2v_vocab.keys():
        d[w] = 1
    shared = OrderedDict()
    count = 0

    for w in list(orig_wordvecs.keys())[:-2]:
        if d[w] > 0:
            shared[w] = count
            count += 1

    # Get the vectors for all words in 'shared'
    w2v = np.zeros((len(shared), 300), dtype='float32')
    sg = np.zeros((len(shared), 620), dtype='float32')
    for w in shared.keys():
        w2v[shared[w]] = w2v_W[w2v_vocab[w]]
        sg[shared[w]] = orig_wordvecs[w]

    train_set = ArrayIterator(X=w2v, y=sg, make_onehot=False)

    layers = [
        Linear(nout=620, init=Gaussian(loc=0.0, scale=0.1)),
        Bias(init=Constant(0.0))
    ]
    clf = Model(layers=layers)

    # regression model is trained using default global batch size
    cost = GeneralizedCost(costfunc=SumSquared())
    opt = GradientDescentMomentum(0.1, 0.9, gradient_clip_value=5.0)
    callbacks = Callbacks(clf)

    clf.fit(train_set,
            num_epochs=20,
            optimizer=opt,
            cost=cost,
            callbacks=callbacks)
    return clf
Пример #2
0
def mlp_layer(name, nout, activation=relu, batch_norm=False, bias=None):
    """
    Layer configuration for MLP generator/discriminator

    Arguments:
        name (string): Layer name
        nout (int): Number of output feature maps
        activation (object): Activation function (defaults to ReLu)
        batch_norm(bool): Enable batch normalization (defaults to False)
    """
    layers = []
    layers.append(Linear(nout=nout, init=init_w, bsum=batch_norm, name=name))
    if batch_norm:
        layers.append(BatchNorm(name=name + '_bnorm', **bn_prm))
    if bias is not None:
        layers.append(Bias(init=None, name=name + '_bias'))
    layers.append(Activation(transform=activation, name=name + '_rectlin'))
    return layers
Пример #3
0
                         lshape=(3, 256, 256))

# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)

# setup model layers

layers = [
    Conv((5, 5, 16), init=init_norm, activation=Rectlin()),
    Pooling(2),
    Conv((5, 5, 32), init=init_norm, activation=Rectlin()),
    Pooling(2),
    Conv((3, 3, 32), init=init_norm, activation=Rectlin()),
    Pooling(2),
    Affine(nout=100, init=init_norm, activation=Rectlin()),
    Linear(nout=4, init=init_norm)
]

model = Model(layers=layers)

# cost = GeneralizedCost(costfunc=CrossEntropyBinary())
cost = GeneralizedCost(costfunc=SumSquared())
# fit and validate
optimizer = RMSProp()

# configure callbacks
callbacks = Callbacks(model, eval_set=eval_set, eval_freq=1)

model.fit(train_set,
          cost=cost,
          optimizer=optimizer,
Пример #4
0
from neon.callbacks.callbacks import Callbacks
import random
import atexit
import bot_params
import numpy as np
import replay_memory
import enemydetector1

try:
    offset_memory = replay_memory.load(bot_params.offset_data_path)
    print "offsets loaded"
except IOError:
    offset_memory = replay_memory.OffsetMemory()

init_norm = Gaussian(loc=-0.1, scale=0.1)
layers = [Linear(1, init=init_norm), Bias(init=init_norm)]

mlp = Model(layers=layers)

cost = GeneralizedCost(costfunc=SumSquared())
optimizer = GradientDescentMomentum(0.5, momentum_coef=0.9)
try:
    mlp.load_params(bot_params.aim_weights_path)
except IOError:
    print "can't load aiming weights"


def get_offset_manual(predictions):
    #enemy_pos = replay_memory.clean_values_toone(predictions)[0, 0]
    x = 0.
    c = 0
Пример #5
0
            BatchNorm(),
            Dropout(keep = 0.8),
            Conv((5, 5, 5, 8), **conv3),
            BatchNorm(),
            Dropout(keep = 0.8),
            Pooling((2, 2, 2)),
            Affine(1024, init=init, activation=lrelu),
            BatchNorm(),
            Affine(1024, init=init, activation=lrelu),
            BatchNorm(),
            b2,
            Affine(nout=1, init=init, bias=init, activation=Logistic())
            ] #real/fake
branch2 = [b2, 
           Affine(nout=1, init=init, bias=init, activation=lrelu)] #E primary
branch3 = [b1,Linear(1, init=Constant(val=1.0))] #SUM ECAL

D_layers = Tree([branch1, branch2, branch3], name="Discriminator") #keep weight between branches equal to 1. for now (alphas=(1.,1.,1.) as by default )
# generator using convolution layers
init_gen = Gaussian(scale=0.001)
relu = Rectlin(slope=0)  # relu for generator
pad1 = dict(pad_h=2, pad_w=2, pad_d=2)
str1 = dict(str_h=2, str_w=2, str_d=2)
conv1 = dict(init=init_gen, batch_norm=False, activation=lrelu, padding=pad1, strides=str1, bias=init_gen)
pad2 = dict(pad_h=2, pad_w=2, pad_d=2)
str2 = dict(str_h=2, str_w=2, str_d=2)
conv2 = dict(init=init_gen, batch_norm=False, activation=lrelu, padding=pad2, strides=str2, bias=init_gen)
pad3 = dict(pad_h=0, pad_w=0, pad_d=0)
str3 = dict(str_h=1, str_w=1, str_d=1)
conv3 = dict(init=init_gen, batch_norm=False, activation=Tanh(), padding=pad3, strides=str3, bias=init_gen)
bg = BranchNode("bg")
Пример #6
0
          Conv((3, 3, 32), init=init_norm, activation=Rectlin()),
          
          Conv((3, 3, 64), strides=2, padding=1, init=init_norm, activation=Rectlin()),
          Conv((3, 3, 64), init=init_norm, activation=Rectlin()),
          Conv((3, 3, 64), init=init_norm, activation=Rectlin()),
          
          Conv((3, 3, 128), strides=2, padding=1, init=init_norm, activation=Rectlin()),
          Conv((3, 3, 128), init=init_norm, activation=Rectlin()),
          Conv((3, 3, 128), init=init_norm, activation=Rectlin()),
          
          Conv((3, 3, 256), strides=2, padding=1, init=init_norm, activation=Rectlin()),
          Conv((3, 3, 256), init=init_norm, activation=Rectlin()),
          Conv((3, 3, 256), init=init_norm, activation=Rectlin()),
          Pooling((8, 8)),
          Dropout(0.5),
          Linear(nout=4, init=Gaussian(loc=0.0, scale=0.01))]


model = Model(layers=layers)

# cost = GeneralizedCost(costfunc=CrossEntropyBinary())
cost = GeneralizedCost(costfunc=SumSquared())
# fit and validate
optimizer = Adam(learning_rate=0.001)

# configure callbacks
callbacks = Callbacks(model, eval_set=eval_set)

model.fit(train_set, cost=cost, optimizer=optimizer, num_epochs=50, callbacks=callbacks)
y_test = model.get_outputs(test_set)
Пример #7
0
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Conv((3, 3, 512), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Affine(nout=4096, init=GlorotUniform(), bias=Constant(0), activation=relu))
vgg_layers.append(Dropout(keep=0.5))
vgg_layers.append(Affine(nout=4096, init=GlorotUniform(), bias=Constant(0), activation=relu))
vgg_layers.append(Dropout(keep=0.5))

vgg_layers.append(Linear(nout=4, init=GlorotUniform()))

model = Model(layers=vgg_layers)

# cost = GeneralizedCost(costfunc=CrossEntropyBinary())
cost = GeneralizedCost(costfunc=SumSquared())
# fit and validate
optimizer = RMSProp()

# configure callbacks
callbacks = Callbacks(model, eval_set=eval_set)

model.fit(train_set, cost=cost, optimizer=optimizer, num_epochs=10, callbacks=callbacks)
y_test = model.get_outputs(test_set)

Пример #8
0
import bot_params as params

import replay_memory as mem

be = gen_backend(backend='cpu', batch_size=params.batch_size)

init_uni = Uniform(low=-0.1, high=0.1)

bn = True

layers = [
    Conv((4, 4, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
    Conv((8, 8, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
    Affine(nout=1000, init=init_uni, activation=Rectlin(), batch_norm=bn),
    Affine(nout=1000, init=init_uni, activation=Rectlin(), batch_norm=bn),
    Linear(nout=160 * 120, init=init_uni)
]

model = Model(layers=layers)


def load():
    model.load_params(params.weigths_path)


def predict(input_img):
    # model.set_batch_size(1)
    x_new = np.zeros((params.batch_size, input_img.size), dtype=np.float16)
    x_new[0] = mem.prepare_image(input_img)
    inp = ArrayIterator(X=x_new,
                        y=None,
Пример #9
0
               'bias': Constant(0),
               'activation': relu}
               
vgg_layers = []

# set up 3x3 conv stacks with different number of filters
vgg_layers.append(Conv((7, 7, 32), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 64), **conv_params))
vgg_layers.append(Conv((3, 3, 128), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Conv((3, 3, 256), **conv_params))
vgg_layers.append(Pooling(2, strides=2))
vgg_layers.append(Dropout(keep=0.5))

vgg_layers.append(Linear(nout=4, init=Gaussian(loc=0.0, scale=0.01)))

model = Model(layers=vgg_layers)

# cost = GeneralizedCost(costfunc=CrossEntropyBinary())
cost = GeneralizedCost(costfunc=SumSquared())
# fit and validate
optimizer = Adam(learning_rate=0.001)

# configure callbacks
callbacks = Callbacks(model, eval_set=eval_set)

model.fit(train_set, cost=cost, optimizer=optimizer, num_epochs=100, callbacks=callbacks)
y_test = model.get_outputs(test_set)