Ejemplo n.º 1
0
def _activate(x, activation, input_size, verbose=2, **kwargs):
    """
    This function is used to produce activations for the outputs of any type of layer.

    Args:

        x: input tensor.
        activation: Refer to the ``add_layer`` method.
        input_size: supply the size of the inputs.
        verbose: typical toolbox verbose
        dimension: used only for maxout. Give the dimension on which to maxout.

    Returns:

        tuple: ``(out, out_shp)``

    """
    if verbose >= 3:
        print("... Setting up activations")

    # some activations like maxouts are supplied with special support parameters
    if type(activation) is tuple:
        if activation[0] == 'maxout':
            maxout_size = activation[2]
            maxout_type = activation[1]
            out, out_shp = activations.Maxout(x=x,
                                              maxout_size=maxout_size,
                                              input_size=input_size,
                                              type=maxout_type,
                                              dimension=kwargs["dimension"])
        if activation[0] == 'relu':
            relu_leak = activation[1]
            out = activations.ReLU(x=x, alpha=relu_leak)
            out_shp = input_size
        if activation[0] == 'softmax':
            temperature = activation[1]
            out = activations.Softmax(x=x, temp=temperature)
            out_shp = input_size
    else:
        if activation == 'relu':
            out = activations.ReLU(x=x)
        elif activation == 'abs':
            out = activations.Abs(x=x)
        elif activation == 'sigmoid':
            out = activations.Sigmoid(x=x)
        elif activation == 'tanh':
            out = activations.Tanh(x=x)
        elif activation == 'softmax':
            out = activations.Softmax(x=x)
        elif activation == 'squared':
            out = activations.Squared(x=x)
        elif activation is None:
            out = x
        out_shp = input_size

    if verbose >= 3:
        print("... Activations are setup")

    return (out, out_shp)
Ejemplo n.º 2
0
 def test_tanh(self):
     expected_array = np.array(
         [[-0.76159416, 0.96402758, -0.99505475, 0.9993293, 0.9999092],
          [-0.76159416, 0.96402758, -0.99505475, 0.9993293, 0.9999092],
          [-0.76159416, 0.96402758, -0.99505475, 0.9993293, 0.9999092],
          [-0.76159416, 0.96402758, -0.99505475, 0.9993293, 0.9999092],
          [-0.76159416, 0.96402758, -0.99505475, 0.9993293, 0.9999092]])
     theano_result = A.Tanh(self.theano_input).eval(
         {self.theano_input: self.numpy_input})
     self.assertEqual(theano_result.shape, expected_array.shape)
     self.assertTrue(np.allclose(theano_result, expected_array))