Ejemplo n.º 1
0
Archivo: nn.py Proyecto: surban/mlutils
 def abel_sigm_abel_tf(x):
     abel1 = abel_fracexpn_f(x,
                             T.shape_padright(self.abel_in_n(lyr)))
     sigm = T.nnet.sigmoid(abel1) + 0.05
     abel2 = abel_fracexpn_f(sigm,
                             T.shape_padright(self.abel_out_n(lyr)))
     return abel2
Ejemplo n.º 2
0
Archivo: nn.py Proyecto: surban/mlutils
 def make_transfer_func(self, lyr, tf):
     if tf == 'tanh':
         return T.tanh
     elif tf == 'sigm':
         return T.nnet.sigmoid
     elif tf == 'softmax':
         return T.nnet.softmax
     elif tf == 'linear':
         return lambda x: x
     elif tf == 'relu':
         return T.nnet.relu
     elif tf == 'exp':
         return T.exp
     elif tf == 'log':
         return lambda x: T.log(T.maximum(0.0001, x))
     elif tf == 'sin':
         return T.sin
     elif tf == 'psi':
         from addiplication.nnet.abelpsi_f import abel_psi_f
         return abel_psi_f
     elif tf == 'psiinv':
         from addiplication.nnet.abelpsi_f import abel_psi_inv_f
         return abel_psi_inv_f
     elif tf == 'abel':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         return lambda x: abel_fracexpn_f(x, T.shape_padright(self.abel_n(lyr)))
     elif tf == 'relu_abel':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         return lambda x: abel_fracexpn_f(T.nnet.relu(x), T.shape_padright(self.abel_n(lyr)))
     elif tf == 'sigm_abel':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         return lambda x: abel_fracexpn_f(T.nnet.sigmoid(x), T.shape_padright(self.abel_n(lyr)))
     elif tf == 'abel_sigm':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         return lambda x: T.nnet.sigmoid(abel_fracexpn_f(x, T.shape_padright(self.abel_n(lyr))))
     elif tf == 'abel_tanh':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         return lambda x: T.tanh(abel_fracexpn_f(x, T.shape_padright(self.abel_n(lyr))))
     elif tf == 'tanh_abel':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         return lambda x: abel_fracexpn_f(T.tanh(x), T.shape_padright(self.abel_n(lyr)))
     elif tf == 'abel_sigm_abel':
         from addiplication.nnet.abelpsi_f import abel_fracexpn_f
         def abel_sigm_abel_tf(x):
             abel1 = abel_fracexpn_f(x, T.shape_padright(self.abel_in_n(lyr)))
             sigm = T.nnet.sigmoid(abel1) + 0.05
             abel2 = abel_fracexpn_f(sigm, T.shape_padright(self.abel_out_n(lyr)))
             return abel2
         return abel_sigm_abel_tf
     else:
         raise ValueError("unknown transfer function %s for layer %s" % (tf, str(lyr)))
Ejemplo n.º 3
0
Archivo: nn.py Proyecto: surban/mlutils
 def abel_sigm_abel_tf(x):
     abel1 = abel_fracexpn_f(x, T.shape_padright(self.abel_in_n(lyr)))
     sigm = T.nnet.sigmoid(abel1) + 0.05
     abel2 = abel_fracexpn_f(sigm, T.shape_padright(self.abel_out_n(lyr)))
     return abel2
Ejemplo n.º 4
0
Archivo: nn.py Proyecto: surban/mlutils
    def make_transfer_func(self, lyr, tf):
        if tf == 'tanh':
            return T.tanh
        elif tf == 'sigm':
            return T.nnet.sigmoid
        elif tf == 'softmax':
            return T.nnet.softmax
        elif tf == 'linear':
            return lambda x: x
        elif tf == 'relu':
            return T.nnet.relu
        elif tf == 'exp':
            return T.exp
        elif tf == 'log':
            return lambda x: T.log(T.maximum(0.0001, x))
        elif tf == 'sin':
            return T.sin
        elif tf == 'psi':
            from addiplication.nnet.abelpsi_f import abel_psi_f
            return abel_psi_f
        elif tf == 'psiinv':
            from addiplication.nnet.abelpsi_f import abel_psi_inv_f
            return abel_psi_inv_f
        elif tf == 'abel':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f
            return lambda x: abel_fracexpn_f(
                x, T.shape_padright(self.abel_n(lyr)))
        elif tf == 'relu_abel':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f
            return lambda x: abel_fracexpn_f(
                T.nnet.relu(x), T.shape_padright(self.abel_n(lyr)))
        elif tf == 'sigm_abel':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f
            return lambda x: abel_fracexpn_f(
                T.nnet.sigmoid(x), T.shape_padright(self.abel_n(lyr)))
        elif tf == 'abel_sigm':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f
            return lambda x: T.nnet.sigmoid(
                abel_fracexpn_f(x, T.shape_padright(self.abel_n(lyr))))
        elif tf == 'abel_tanh':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f
            return lambda x: T.tanh(
                abel_fracexpn_f(x, T.shape_padright(self.abel_n(lyr))))
        elif tf == 'tanh_abel':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f
            return lambda x: abel_fracexpn_f(
                T.tanh(x), T.shape_padright(self.abel_n(lyr)))
        elif tf == 'abel_sigm_abel':
            from addiplication.nnet.abelpsi_f import abel_fracexpn_f

            def abel_sigm_abel_tf(x):
                abel1 = abel_fracexpn_f(x,
                                        T.shape_padright(self.abel_in_n(lyr)))
                sigm = T.nnet.sigmoid(abel1) + 0.05
                abel2 = abel_fracexpn_f(sigm,
                                        T.shape_padright(self.abel_out_n(lyr)))
                return abel2

            return abel_sigm_abel_tf
        else:
            raise ValueError("unknown transfer function %s for layer %s" %
                             (tf, str(lyr)))