示例#1
0
    def __init__(self, name, state_names, hidden_size=100, input_type="sequence", output_type="sequence",
                 inner_init=None, outer_init=None,
                 gate_activation='sigmoid', activation='tanh',
                 steps=None, backward=False, mask=None,
                 additional_input_dims=None):
        super(RecurrentLayer, self).__init__(name)
        self.state_names = state_names
        self.main_state = state_names[0]
        self.hidden_size = hidden_size
        self._gate_activation = gate_activation
        self._activation = activation
        self.gate_activate = build_activation(self._gate_activation)
        self.activate = build_activation(self._activation)
        self._input_type = input_type
        self._output_type = output_type
        self.inner_init = inner_init if inner_init else OrthogonalInitializer()
        self.outer_init = outer_init if outer_init else XavierGlorotInitializer()
        self._steps = steps
        self._mask = mask.tensor if type(mask) == NeuralVariable else mask
        self._go_backwards = backward
        self.additional_input_dims = additional_input_dims if additional_input_dims else []

        if input_type not in INPUT_TYPES:
            raise Exception("Input type of {} is wrong: {}".format(name, input_type))
        if output_type not in OUTPUT_TYPES:
            raise Exception("Output type of {} is wrong: {}".format(name, output_type))
    def setup(self):
        self.output_dim = self.input_dim
        self._act = build_activation(self.activation)
        self.W_hl = self.create_weight(self.input_dim, self.projection_dim, "hl", initializer=self.init)
        self.W_tl = self.create_weight(self.input_dim, self.projection_dim, "tl", initializer=self.init)
        self.W_hr = self.create_weight(self.projection_dim, self.input_dim, "hr", initializer=self.init)
        self.W_tr = self.create_weight(self.projection_dim, self.input_dim, "tr", initializer=self.init)
        self.B_h = self.create_bias(self.input_dim, "h")
        self.B_t = self.create_bias(self.input_dim, "t", value=self.gate_bias)
        
        self.S_h = self.create_vector(self.input_dim, "S_h")
        self.S_t = self.create_vector(self.input_dim, "S_t")
        self.S_h.set_value(np.ones(self.input_dim, dtype=FLOATX))
        self.S_t.set_value(np.ones(self.input_dim, dtype=FLOATX))

        self.register_parameters(self.W_hl, self.B_h, self.W_tl, self.B_t, self.W_hr, self.W_tr, self.S_h, self.S_t)

        self.Mean_hl = self.create_vector(self.projection_dim, "Mean_hl")
        self.Mean_tl = self.create_vector(self.projection_dim, "Mean_tl")
        self.Mean_hr = self.create_vector(self.input_dim, "Mean_hr")
        self.Mean_tr = self.create_vector(self.input_dim, "Mean_tr")
        self.Std_hl = self.create_vector(self.projection_dim, "Std_hl")
        self.Std_tl = self.create_vector(self.projection_dim, "Std_tl")
        self.Std_hr = self.create_vector(self.input_dim, "Std_hr")
        self.Std_tr = self.create_vector(self.input_dim, "Std_tr")
        self.Std_hl.set_value(np.ones(self.projection_dim, dtype=FLOATX))
        self.Std_tl.set_value(np.ones(self.projection_dim, dtype=FLOATX))
        self.Std_hr.set_value(np.ones(self.input_dim, dtype=FLOATX))
        self.Std_tr.set_value(np.ones(self.input_dim, dtype=FLOATX))
        
        self.register_free_parameters(self.Mean_hl, self.Mean_tl, self.Mean_hr, self.Mean_tr, self.Std_hl, self.Std_tl, self.Std_hr, self.Std_tr)
示例#3
0
    def prepare(self):
        self.output_dim = self.input_dim
        self._act = build_activation(self.activation)
        self.W_h = self.create_weight(self.input_dim, self.input_dim, "h", initializer=self.init)
        self.W_t = self.create_weight(self.input_dim, self.input_dim, "t", initializer=self.init)
        self.B_h = self.create_bias(self.input_dim, "h")
        self.B_t = self.create_bias(self.input_dim, "t", value=self.gate_bias)

        self.register_parameters(self.W_h, self.B_h, self.W_t, self.B_t)
示例#4
0
    def setup(self):
        self.output_dim = self.input_dim
        self._act = build_activation(self.activation)
        self.W_hl = self.create_weight(self.input_dim,
                                       self.projection_dim,
                                       "hl",
                                       initializer=self.init)
        self.W_tl = self.create_weight(self.input_dim,
                                       self.projection_dim,
                                       "tl",
                                       initializer=self.init)
        self.W_hr = self.create_weight(self.projection_dim,
                                       self.input_dim,
                                       "hr",
                                       initializer=self.init)
        self.W_tr = self.create_weight(self.projection_dim,
                                       self.input_dim,
                                       "tr",
                                       initializer=self.init)
        self.B_h = self.create_bias(self.input_dim, "h")
        self.B_t = self.create_bias(self.input_dim, "t", value=self.gate_bias)
        self.D_h = self.create_vector(self.input_dim, "D_h")
        self.D_t = self.create_vector(self.input_dim, "D_t")
        self.D_h.set_value(
            np.ones(self.input_dim, dtype=FLOATX) * self.diag_init_val)
        self.D_t.set_value(
            np.ones(self.input_dim, dtype=FLOATX) * self.diag_init_val)

        self.S_h = self.create_vector(self.input_dim, "S_h")
        self.S_t = self.create_vector(self.input_dim, "S_t")
        self.S_h.set_value(np.ones(self.input_dim, dtype=FLOATX))
        self.S_t.set_value(np.ones(self.input_dim, dtype=FLOATX))

        self.register_parameters(self.W_hl, self.B_h, self.W_tl, self.B_t,
                                 self.W_hr, self.W_tr, self.D_h, self.D_t,
                                 self.S_h, self.S_t)

        self.Mean_hl = self.create_vector(self.projection_dim, "Mean_hl")
        self.Mean_tl = self.create_vector(self.projection_dim, "Mean_tl")
        self.Mean_hr = self.create_vector(self.input_dim, "Mean_hr")
        self.Mean_tr = self.create_vector(self.input_dim, "Mean_tr")
        self.Std_hl = self.create_vector(self.projection_dim, "Std_hl")
        self.Std_tl = self.create_vector(self.projection_dim, "Std_tl")
        self.Std_hr = self.create_vector(self.input_dim, "Std_hr")
        self.Std_tr = self.create_vector(self.input_dim, "Std_tr")
        self.Std_hl.set_value(np.ones(self.projection_dim, dtype=FLOATX))
        self.Std_tl.set_value(np.ones(self.projection_dim, dtype=FLOATX))
        self.Std_hr.set_value(np.ones(self.input_dim, dtype=FLOATX))
        self.Std_tr.set_value(np.ones(self.input_dim, dtype=FLOATX))

        self.register_free_parameters(self.Mean_hl, self.Mean_tl, self.Mean_hr,
                                      self.Mean_tr, self.Std_hl, self.Std_tl,
                                      self.Std_hr, self.Std_tr)

        if self.quasi_ortho_init:
            self.setup_quasi_ortho_init()
示例#5
0
文件: recurrent.py 项目: yochju/deepy
    def __init__(self,
                 name,
                 state_names,
                 hidden_size=100,
                 input_type="sequence",
                 output_type="sequence",
                 inner_init=None,
                 outer_init=None,
                 gate_activation='sigmoid',
                 activation='tanh',
                 steps=None,
                 backward=False,
                 mask=None,
                 additional_input_dims=None):
        super(RecurrentLayer, self).__init__(name)
        self.state_names = state_names
        self.main_state = state_names[0]
        self.hidden_size = hidden_size
        self._gate_activation = gate_activation
        self._activation = activation
        self.gate_activate = build_activation(self._gate_activation)
        self.activate = build_activation(self._activation)
        self._input_type = input_type
        self._output_type = output_type
        self.inner_init = inner_init if inner_init else OrthogonalInitializer()
        self.outer_init = outer_init if outer_init else XavierGlorotInitializer(
        )
        self._steps = steps
        self._mask = mask.tensor if type(mask) == NeuralVariable else mask
        self._go_backwards = backward
        self.additional_input_dims = additional_input_dims if additional_input_dims else []

        if input_type not in INPUT_TYPES:
            raise Exception("Input type of {} is wrong: {}".format(
                name, input_type))
        if output_type not in OUTPUT_TYPES:
            raise Exception("Output type of {} is wrong: {}".format(
                name, output_type))
示例#6
0
    def prepare(self):
        self.output_dim = self.input_dim
        self._act = build_activation(self.activation)
        self.W_h = self.create_weight(self.input_dim,
                                      self.input_dim,
                                      "h",
                                      initializer=self.init)
        self.W_t = self.create_weight(self.input_dim,
                                      self.input_dim,
                                      "t",
                                      initializer=self.init)
        self.B_h = self.create_bias(self.input_dim, "h")
        self.B_t = self.create_bias(self.input_dim, "t", value=self.gate_bias)

        self.register_parameters(self.W_h, self.B_h, self.W_t, self.B_t)
示例#7
0
文件: lstm.py 项目: JunjieHu/deepy
 def _setup_functions(self):
     self._inner_act = build_activation(self._inner_activation)
     self._outer_act = build_activation(self._outer_activation)
示例#8
0
 def _setup_functions(self):
     self._activation = build_activation(self.activation)
示例#9
0
 def _setup_functions(self):
     self._assistive_params = []
     self._activation_func = build_activation(self.activation)
     self._softmax_func = build_activation("softmax")
示例#10
0
 def __init__(self, activation_type):
     super(Activation, self).__init__(activation_type)
     self._activation = build_activation(activation_type)
示例#11
0
文件: activation.py 项目: 52nlp/deepy
 def __init__(self, activation_type):
     super(Activation, self).__init__(activation_type)
     self._activation = build_activation(activation_type)
示例#12
0
文件: layer.py 项目: bluetit/deepy
 def activation(self, name):
     return build_activation(name)
示例#13
0
文件: conv.py 项目: zuxfoucault/deepy
 def _setup_functions(self):
     self._activation_func = build_activation(self.activation)
示例#14
0
文件: layer.py 项目: yochju/deepy
 def activation(self, name):
     return build_activation(name)
示例#15
0
 def _setup_functions(self):
     self._relu = build_activation("tanh")
     self._tanh = build_activation("tanh")
     self._softmax = build_activation("softmax")
     self.output_func = self._output_func()
示例#16
0
 def _setup_functions(self):
     self._hidden_act = build_activation(self._hidden_activation)
示例#17
0
 def _setup_functions(self):
     self._relu = build_activation("tanh")
     self._tanh = build_activation("tanh")
     self._softmax = build_activation("softmax")
     self.output_func = self._output_func()
示例#18
0
 def _setup_functions(self):
     self._hidden_act = build_activation(self._hidden_activation)
 def _setup_functions(self):
     self._assistive_params = []
     self._activation_func = build_activation(self.activation)
     self._softmax_func = build_activation('softmax')
示例#20
0
文件: gru.py 项目: longdt219/deepy
 def _setup_functions(self):
     self._inner_act = build_activation(self._inner_activation)
     self._outer_act = build_activation(self._outer_activation)