コード例 #1
0
ファイル: attention.py プロジェクト: lxastro/dlx
 def __init__(self, input_length, input_dim, output_dim, context_dim, attention_hidden_dim, 
              name='AttentionLSTM', truncate_gradient=-1, go_backwards=False,
              weight_init = 'glorot_uniform', inner_init = 'orthogonal', bias_init = 'zero', forget_bias_init = 'one',
              activation='tanh', attention_activation='tanh', inner_activation='hard_sigmoid'):
     super(AttentionLSTM_X, self).__init__()
     self.input_length = input_length
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.context_dim = context_dim
     self.attention_hidden_dim = attention_hidden_dim
     self.name=name
     self.truncate_gradient = truncate_gradient
     self.go_backwards = go_backwards
     
     self.required_input_sets = [['input_single', 'context'], ['input_sequence', 'context'], ['input_sequence', 'input_mask', 'context']]
     self.output_names = ['output_last', 'output_sequence', 'output_sequence_with_alpha', 'output_last_with_alpha']
     self.required_function_sets = [['weight_init', 'inner_init', 'bias_init', 'forget_bias_init', 'activation', 'attention_activation']]
     self.set_output('output_last', self.output_last)
     self.set_output('output_sequence', self.output_sequence)
     self.set_output('output_last_with_alpha', self.output_last_with_alpha)
     self.set_output('output_sequence_with_alpha', self.output_sequence_with_alpha)
     self.set_function('activation', activations.get(activation))
     self.set_function('attention_activation', activations.get(attention_activation))
     self.set_function('inner_activation', activations.get(inner_activation))
     self.set_function('weight_init', initializations.get(weight_init))
     self.set_function('inner_init', initializations.get(weight_init))
     self.set_function('bias_init', initializations.get(bias_init))
     self.set_function('forget_bias_init', initializations.get(forget_bias_init))
コード例 #2
0
    def __init__(self,
                 input_length,
                 input_dim,
                 output_dim,
                 context_dim,
                 attention_hidden_dim,
                 name='AttentionLSTM',
                 truncate_gradient=-1,
                 go_backwards=False,
                 weight_init='glorot_uniform',
                 inner_init='orthogonal',
                 bias_init='zero',
                 forget_bias_init='one',
                 activation='tanh',
                 attention_activation='tanh',
                 inner_activation='hard_sigmoid'):
        super(AttentionLSTM_X, self).__init__()
        self.input_length = input_length
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.context_dim = context_dim
        self.attention_hidden_dim = attention_hidden_dim
        self.name = name
        self.truncate_gradient = truncate_gradient
        self.go_backwards = go_backwards

        self.required_input_sets = [['input_single', 'context'],
                                    ['input_sequence', 'context'],
                                    [
                                        'input_sequence', 'input_mask',
                                        'context'
                                    ]]
        self.output_names = [
            'output_last', 'output_sequence', 'output_sequence_with_alpha',
            'output_last_with_alpha'
        ]
        self.required_function_sets = [[
            'weight_init', 'inner_init', 'bias_init', 'forget_bias_init',
            'activation', 'attention_activation'
        ]]
        self.set_output('output_last', self.output_last)
        self.set_output('output_sequence', self.output_sequence)
        self.set_output('output_last_with_alpha', self.output_last_with_alpha)
        self.set_output('output_sequence_with_alpha',
                        self.output_sequence_with_alpha)
        self.set_function('activation', activations.get(activation))
        self.set_function('attention_activation',
                          activations.get(attention_activation))
        self.set_function('inner_activation',
                          activations.get(inner_activation))
        self.set_function('weight_init', initializations.get(weight_init))
        self.set_function('inner_init', initializations.get(weight_init))
        self.set_function('bias_init', initializations.get(bias_init))
        self.set_function('forget_bias_init',
                          initializations.get(forget_bias_init))
コード例 #3
0
ファイル: recurrent.py プロジェクト: lchmo444/dlx
    def __init__(self,
                 input_length,
                 input_dim,
                 output_dim,
                 name='RNN',
                 truncate_gradient=-1,
                 go_backwards=False,
                 weight_init='glorot_uniform',
                 inner_init='orthogonal',
                 bias_init='zero',
                 activation='sigmoid'):
        super(RNN, self).__init__()
        self.input_length = input_length
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.name = name
        self.truncate_gradient = truncate_gradient
        self.go_backwards = go_backwards

        self.required_input_sets = [['input_single'], ['input_sequence'],
                                    ['input_sequence', 'input_mask']]
        self.output_names = ['output_last', 'output_sequence']
        self.required_function_sets = [[
            'weight_init', 'inner_init', 'bias_init', 'activation'
        ]]
        self.set_output('output_last', self.output_last)
        self.set_output('output_sequence', self.output_sequence)
        self.set_function('activation', activations.get(activation))
        self.set_function('weight_init', initializations.get(weight_init))
        self.set_function('inner_init', initializations.get(weight_init))
        self.set_function('bias_init', initializations.get(bias_init))
コード例 #4
0
ファイル: core.py プロジェクト: lxastro/dlx
 def __init__(self, activation):
     super(Activation, self).__init__()
     
     self.required_input_sets = [['input']]
     self.output_names = ['output']
     self.required_function_sets = [['activation']]
     self.set_output('output', self.output)
     self.set_function('activation', activations.get(activation))
コード例 #5
0
    def __init__(self, activation):
        super(Activation, self).__init__()

        self.required_input_sets = [['input']]
        self.output_names = ['output']
        self.required_function_sets = [['activation']]
        self.set_output('output', self.output)
        self.set_function('activation', activations.get(activation))
コード例 #6
0
ファイル: core.py プロジェクト: lxastro/dlx
 def __init__(self, input_dim, output_dim, name='Dense', weight_init='glorot_uniform', bias_init='zero', activation='linear'):
     super(Dense, self).__init__()
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.name=name
     
     self.required_input_sets = [['input']]
     self.output_names = ['output']
     self.required_function_sets = [['weight_init', 'bias_init', 'activation']]
     self.set_output('output', self.output)
     self.set_function('activation', activations.get(activation))
     self.set_function('weight_init', initializations.get(weight_init))
     self.set_function('bias_init', initializations.get(bias_init))
コード例 #7
0
ファイル: recurrent.py プロジェクト: lxastro/dlx
 def __init__(self, input_length, input_dim, output_dim, name='RNN', truncate_gradient=-1, go_backwards=False,
              weight_init = 'glorot_uniform', inner_init = 'orthogonal', bias_init = 'zero', activation='sigmoid'):
     super(RNN, self).__init__()
     self.input_length = input_length
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.name=name
     self.truncate_gradient = truncate_gradient
     self.go_backwards = go_backwards
     
     self.required_input_sets = [['input_single'], ['input_sequence'], ['input_sequence', 'input_mask']]
     self.output_names = ['output_last', 'output_sequence']
     self.required_function_sets = [['weight_init', 'inner_init', 'bias_init', 'activation']]
     self.set_output('output_last', self.output_last)
     self.set_output('output_sequence', self.output_sequence)
     self.set_function('activation', activations.get(activation))
     self.set_function('weight_init', initializations.get(weight_init))
     self.set_function('inner_init', initializations.get(weight_init))
     self.set_function('bias_init', initializations.get(bias_init))
コード例 #8
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 name='Dense',
                 weight_init='glorot_uniform',
                 bias_init='zero',
                 activation='linear'):
        super(Dense, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.name = name

        self.required_input_sets = [['input']]
        self.output_names = ['output']
        self.required_function_sets = [[
            'weight_init', 'bias_init', 'activation'
        ]]
        self.set_output('output', self.output)
        self.set_function('activation', activations.get(activation))
        self.set_function('weight_init', initializations.get(weight_init))
        self.set_function('bias_init', initializations.get(bias_init))
コード例 #9
0
ファイル: activations.py プロジェクト: lxastro/dlx
def add_one(x):
    return x + 1.0

init_list = [('softmax', data2),
             ('softplus', data2),
             ('relu', data2),
             ('tanh', data2),
             ('sigmoid',data2),
             ('hard_sigmoid',data2),
             ('linear',data2),
             (add_one, data2),
             ('softmax', data3),
             ('softplus', data3),
             ('relu', data3),
             ('tanh', data3),
             ('sigmoid',data3),
             ('hard_sigmoid',data3),
             ('linear',data3),
             (add_one, data3)
            ]

for (act, data) in init_list:
    if data.ndim == 2:
        x = T.matrix('x')
    else:
        x = T.tensor3('x')
    fun = theano.function([x], activations.get(act)(x))
    val = fun(data)
    print act, 'data' + str(data.ndim), ':'
    print val
    print