def test_maxout_2d(self): out, out_shp = A.Maxout(self.input_zeros, self.maxout_size, self.input_size, type='maxout', dimension=2) self.assertTrue(out_shp, self.input_size)
def _activate(x, activation, input_size, verbose=2, **kwargs): """ This function is used to produce activations for the outputs of any type of layer. Args: x: input tensor. activation: Refer to the ``add_layer`` method. input_size: supply the size of the inputs. verbose: typical toolbox verbose dimension: used only for maxout. Give the dimension on which to maxout. Returns: tuple: ``(out, out_shp)`` """ if verbose >= 3: print("... Setting up activations") # some activations like maxouts are supplied with special support parameters if type(activation) is tuple: if activation[0] == 'maxout': maxout_size = activation[2] maxout_type = activation[1] out, out_shp = activations.Maxout(x=x, maxout_size=maxout_size, input_size=input_size, type=maxout_type, dimension=kwargs["dimension"]) if activation[0] == 'relu': relu_leak = activation[1] out = activations.ReLU(x=x, alpha=relu_leak) out_shp = input_size if activation[0] == 'softmax': temperature = activation[1] out = activations.Softmax(x=x, temp=temperature) out_shp = input_size else: if activation == 'relu': out = activations.ReLU(x=x) elif activation == 'abs': out = activations.Abs(x=x) elif activation == 'sigmoid': out = activations.Sigmoid(x=x) elif activation == 'tanh': out = activations.Tanh(x=x) elif activation == 'softmax': out = activations.Softmax(x=x) elif activation == 'squared': out = activations.Squared(x=x) elif activation is None: out = x out_shp = input_size if verbose >= 3: print("... Activations are setup") return (out, out_shp)
def test_meanout(self): out, out_shp = A.Maxout(self.input_zeros, self.maxout_size, self.input_size_min, type='meanout', dimension=1) self.assertTrue(np.allclose(out, self.input_zeros)) self.assertTrue(out_shp, self.input_size_min)