Exemplo n.º 1
0
 def __init__(self, input_length, input_dim, output_dim, context_dim, attention_hidden_dim, 
              name='AttentionLSTM', truncate_gradient=-1, go_backwards=False,
              weight_init = 'glorot_uniform', inner_init = 'orthogonal', bias_init = 'zero', forget_bias_init = 'one',
              activation='tanh', attention_activation='tanh', inner_activation='hard_sigmoid'):
     super(AttentionLSTM_X, self).__init__()
     self.input_length = input_length
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.context_dim = context_dim
     self.attention_hidden_dim = attention_hidden_dim
     self.name=name
     self.truncate_gradient = truncate_gradient
     self.go_backwards = go_backwards
     
     self.required_input_sets = [['input_single', 'context'], ['input_sequence', 'context'], ['input_sequence', 'input_mask', 'context']]
     self.output_names = ['output_last', 'output_sequence', 'output_sequence_with_alpha', 'output_last_with_alpha']
     self.required_function_sets = [['weight_init', 'inner_init', 'bias_init', 'forget_bias_init', 'activation', 'attention_activation']]
     self.set_output('output_last', self.output_last)
     self.set_output('output_sequence', self.output_sequence)
     self.set_output('output_last_with_alpha', self.output_last_with_alpha)
     self.set_output('output_sequence_with_alpha', self.output_sequence_with_alpha)
     self.set_function('activation', activations.get(activation))
     self.set_function('attention_activation', activations.get(attention_activation))
     self.set_function('inner_activation', activations.get(inner_activation))
     self.set_function('weight_init', initializations.get(weight_init))
     self.set_function('inner_init', initializations.get(weight_init))
     self.set_function('bias_init', initializations.get(bias_init))
     self.set_function('forget_bias_init', initializations.get(forget_bias_init))
Exemplo n.º 2
0
    def __init__(self,
                 input_length,
                 input_dim,
                 output_dim,
                 name='RNN',
                 truncate_gradient=-1,
                 go_backwards=False,
                 weight_init='glorot_uniform',
                 inner_init='orthogonal',
                 bias_init='zero',
                 activation='sigmoid'):
        super(RNN, self).__init__()
        self.input_length = input_length
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.name = name
        self.truncate_gradient = truncate_gradient
        self.go_backwards = go_backwards

        self.required_input_sets = [['input_single'], ['input_sequence'],
                                    ['input_sequence', 'input_mask']]
        self.output_names = ['output_last', 'output_sequence']
        self.required_function_sets = [[
            'weight_init', 'inner_init', 'bias_init', 'activation'
        ]]
        self.set_output('output_last', self.output_last)
        self.set_output('output_sequence', self.output_sequence)
        self.set_function('activation', activations.get(activation))
        self.set_function('weight_init', initializations.get(weight_init))
        self.set_function('inner_init', initializations.get(weight_init))
        self.set_function('bias_init', initializations.get(bias_init))
Exemplo n.º 3
0
    def __init__(self,
                 input_length,
                 input_dim,
                 output_dim,
                 context_dim,
                 attention_hidden_dim,
                 name='AttentionLSTM',
                 truncate_gradient=-1,
                 go_backwards=False,
                 weight_init='glorot_uniform',
                 inner_init='orthogonal',
                 bias_init='zero',
                 forget_bias_init='one',
                 activation='tanh',
                 attention_activation='tanh',
                 inner_activation='hard_sigmoid'):
        super(AttentionLSTM_X, self).__init__()
        self.input_length = input_length
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.context_dim = context_dim
        self.attention_hidden_dim = attention_hidden_dim
        self.name = name
        self.truncate_gradient = truncate_gradient
        self.go_backwards = go_backwards

        self.required_input_sets = [['input_single', 'context'],
                                    ['input_sequence', 'context'],
                                    [
                                        'input_sequence', 'input_mask',
                                        'context'
                                    ]]
        self.output_names = [
            'output_last', 'output_sequence', 'output_sequence_with_alpha',
            'output_last_with_alpha'
        ]
        self.required_function_sets = [[
            'weight_init', 'inner_init', 'bias_init', 'forget_bias_init',
            'activation', 'attention_activation'
        ]]
        self.set_output('output_last', self.output_last)
        self.set_output('output_sequence', self.output_sequence)
        self.set_output('output_last_with_alpha', self.output_last_with_alpha)
        self.set_output('output_sequence_with_alpha',
                        self.output_sequence_with_alpha)
        self.set_function('activation', activations.get(activation))
        self.set_function('attention_activation',
                          activations.get(attention_activation))
        self.set_function('inner_activation',
                          activations.get(inner_activation))
        self.set_function('weight_init', initializations.get(weight_init))
        self.set_function('inner_init', initializations.get(weight_init))
        self.set_function('bias_init', initializations.get(bias_init))
        self.set_function('forget_bias_init',
                          initializations.get(forget_bias_init))
Exemplo n.º 4
0
Arquivo: core.py Projeto: lxastro/dlx
 def __init__(self, input_dim, output_dim, name='Dense', weight_init='glorot_uniform', bias_init='zero', activation='linear'):
     super(Dense, self).__init__()
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.name=name
     
     self.required_input_sets = [['input']]
     self.output_names = ['output']
     self.required_function_sets = [['weight_init', 'bias_init', 'activation']]
     self.set_output('output', self.output)
     self.set_function('activation', activations.get(activation))
     self.set_function('weight_init', initializations.get(weight_init))
     self.set_function('bias_init', initializations.get(bias_init))
Exemplo n.º 5
0
 def __init__(self, input_length, input_dim, output_dim, name='RNN', truncate_gradient=-1, go_backwards=False,
              weight_init = 'glorot_uniform', inner_init = 'orthogonal', bias_init = 'zero', activation='sigmoid'):
     super(RNN, self).__init__()
     self.input_length = input_length
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.name=name
     self.truncate_gradient = truncate_gradient
     self.go_backwards = go_backwards
     
     self.required_input_sets = [['input_single'], ['input_sequence'], ['input_sequence', 'input_mask']]
     self.output_names = ['output_last', 'output_sequence']
     self.required_function_sets = [['weight_init', 'inner_init', 'bias_init', 'activation']]
     self.set_output('output_last', self.output_last)
     self.set_output('output_sequence', self.output_sequence)
     self.set_function('activation', activations.get(activation))
     self.set_function('weight_init', initializations.get(weight_init))
     self.set_function('inner_init', initializations.get(weight_init))
     self.set_function('bias_init', initializations.get(bias_init))
Exemplo n.º 6
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 name='Dense',
                 weight_init='glorot_uniform',
                 bias_init='zero',
                 activation='linear'):
        super(Dense, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.name = name

        self.required_input_sets = [['input']]
        self.output_names = ['output']
        self.required_function_sets = [[
            'weight_init', 'bias_init', 'activation'
        ]]
        self.set_output('output', self.output)
        self.set_function('activation', activations.get(activation))
        self.set_function('weight_init', initializations.get(weight_init))
        self.set_function('bias_init', initializations.get(bias_init))
Exemplo n.º 7
0
from dlx import initializations
import numpy as np

print '\n------------------------------------------------------------'
print 'Test: dlx.initializations'

def two(shape):
    return 2. * np.ones(shape)

init_dict = {'uniform': (2,3,4),
             'normal': (2,3,4),
             'lecun_uniform':[2,3,4],
             'glorot_normal':(2,3,4),
             'glorot_uniform':(2,3,4),
             'he_normal':[2,3,4],
             'he_uniform':(2,3,4),
             'orthogonal':(4,4),
             'identity':(4,4),
             'zero':(2,3,4),
             'one':(2,3,4),
             two: (2,3,4)
            }

for fun, shape in init_dict.items():
    val = initializations.get(fun)(shape)
    print fun, shape, val.dtype, ':'
    print val
    print
Exemplo n.º 8
0
from dlx import initializations
import numpy as np

print '\n------------------------------------------------------------'
print 'Test: dlx.initializations'


def two(shape):
    return 2. * np.ones(shape)


init_dict = {
    'uniform': (2, 3, 4),
    'normal': (2, 3, 4),
    'lecun_uniform': [2, 3, 4],
    'glorot_normal': (2, 3, 4),
    'glorot_uniform': (2, 3, 4),
    'he_normal': [2, 3, 4],
    'he_uniform': (2, 3, 4),
    'orthogonal': (4, 4),
    'identity': (4, 4),
    'zero': (2, 3, 4),
    'one': (2, 3, 4),
    two: (2, 3, 4)
}

for fun, shape in init_dict.items():
    val = initializations.get(fun)(shape)
    print fun, shape, val.dtype, ':'
    print val
    print