def model(action_axes): return neon.Sequential([ neon.Affine( nout=10, weight_init=neon.GlorotInit(), bias_init=neon.ConstantInit(), activation=neon.Tanh(), ), neon.Affine(weight_init=neon.GlorotInit(), bias_init=neon.ConstantInit(), activation=neon.Tanh(), axes=(action_axes, )), ])
def __init__(self, activation_function=neon.Rectlin(), name="middleware_embedder"): self.name = name self.input = None self.output = None self.weights_init = neon.GlorotInit() self.biases_init = neon.ConstantInit() self.activation_function = activation_function
def __init__(self, input_size, batch_size=None, activation_function=neon.Rectlin(), name="embedder"): self.name = name self.input_size = input_size self.batch_size = batch_size self.activation_function = activation_function self.weights_init = neon.GlorotInit() self.biases_init = neon.ConstantInit() self.input = None self.output = None
def __init__(self, tuning_parameters, head_idx=0, loss_weight=1., is_local=True): self.head_idx = head_idx self.name = "head" self.output = [] self.loss = [] self.loss_type = [] self.regularizations = [] self.loss_weight = force_list(loss_weight) self.weights_init = neon.GlorotInit() self.biases_init = neon.ConstantInit() self.target = [] self.input = [] self.is_local = is_local self.batch_size = tuning_parameters.batch_size