Exemplo n.º 1
0
def deconv_layer(name,
                 n_feature,
                 ker_size=4,
                 strides=2,
                 padding=1,
                 activation=lrelu,
                 batch_norm=True,
                 bias=None):
    """
    Layer configuration for deep-convolutional (DC) discriminator

    Arguments:
        name (string): Layer name'
        n_feature (int): Number of output feature maps
        ker_size (int): Size of convolutional kernel (defaults to 4)
        strides (int): Stride of convolution (defaults to 2)
        padding (int): Padding of convolution (defaults to 1)
        activation (object): Activation function (defaults to leaky ReLu)
        batch_norm(bool): Enable batch normalization (defaults to True)
    """
    layers = []
    layers.append(
        Deconvolution(fshape=(ker_size, ker_size, n_feature),
                      strides=strides,
                      padding=padding,
                      dilation={},
                      init=init_w,
                      bsum=batch_norm,
                      name=name))
    if batch_norm:
        layers.append(BatchNorm(name=name + '_bnorm', **bn_prm))
    if bias is not None:
        layers.append(Bias(init=None, name=name + '_bias'))
    layers.append(Activation(transform=activation, name=name + '_rectlin'))
    return layers
Exemplo n.º 2
0
Arquivo: util.py Projeto: yw774/neon
def train_regressor(orig_wordvecs, w2v_W, w2v_vocab):
    """
    Return regressor to map word2vec to RNN word space

    Function modified from:
    https://github.com/ryankiros/skip-thoughts/blob/master/training/tools.py
    """
    # Gather all words from word2vec that appear in wordvecs
    d = defaultdict(lambda: 0)
    for w in w2v_vocab.keys():
        d[w] = 1
    shared = OrderedDict()
    count = 0

    for w in list(orig_wordvecs.keys())[:-2]:
        if d[w] > 0:
            shared[w] = count
            count += 1

    # Get the vectors for all words in 'shared'
    w2v = np.zeros((len(shared), 300), dtype='float32')
    sg = np.zeros((len(shared), 620), dtype='float32')
    for w in shared.keys():
        w2v[shared[w]] = w2v_W[w2v_vocab[w]]
        sg[shared[w]] = orig_wordvecs[w]

    train_set = ArrayIterator(X=w2v, y=sg, make_onehot=False)

    layers = [
        Linear(nout=620, init=Gaussian(loc=0.0, scale=0.1)),
        Bias(init=Constant(0.0))
    ]
    clf = Model(layers=layers)

    # regression model is trained using default global batch size
    cost = GeneralizedCost(costfunc=SumSquared())
    opt = GradientDescentMomentum(0.1, 0.9, gradient_clip_value=5.0)
    callbacks = Callbacks(clf)

    clf.fit(train_set,
            num_epochs=20,
            optimizer=opt,
            cost=cost,
            callbacks=callbacks)
    return clf
Exemplo n.º 3
0
def mlp_layer(name, nout, activation=relu, batch_norm=False, bias=None):
    """
    Layer configuration for MLP generator/discriminator

    Arguments:
        name (string): Layer name
        nout (int): Number of output feature maps
        activation (object): Activation function (defaults to ReLu)
        batch_norm(bool): Enable batch normalization (defaults to False)
    """
    layers = []
    layers.append(Linear(nout=nout, init=init_w, bsum=batch_norm, name=name))
    if batch_norm:
        layers.append(BatchNorm(name=name + '_bnorm', **bn_prm))
    if bias is not None:
        layers.append(Bias(init=None, name=name + '_bias'))
    layers.append(Activation(transform=activation, name=name + '_rectlin'))
    return layers
Exemplo n.º 4
0
from neon.callbacks.callbacks import Callbacks
import random
import atexit
import bot_params
import numpy as np
import replay_memory
import enemydetector1

try:
    offset_memory = replay_memory.load(bot_params.offset_data_path)
    print "offsets loaded"
except IOError:
    offset_memory = replay_memory.OffsetMemory()

init_norm = Gaussian(loc=-0.1, scale=0.1)
layers = [Linear(1, init=init_norm), Bias(init=init_norm)]

mlp = Model(layers=layers)

cost = GeneralizedCost(costfunc=SumSquared())
optimizer = GradientDescentMomentum(0.5, momentum_coef=0.9)
try:
    mlp.load_params(bot_params.aim_weights_path)
except IOError:
    print "can't load aiming weights"


def get_offset_manual(predictions):
    #enemy_pos = replay_memory.clean_values_toone(predictions)[0, 0]
    x = 0.
    c = 0