Esempio n. 1
0
def create_layer(name):
    if name == 'aabh':
        return AdaptativeAssymetricBiHyperbolic()
    elif name == 'abh':
        return AdaptativeBiHyperbolic()
    elif name == 'ah':
        return AdaptativeHyperbolic()
    elif name == 'ahrelu':
        return AdaptativeHyperbolicReLU()
    elif name == 'srelu':
        return SReLU()
    elif name == 'prelu':
        return PReLU()
    elif name == 'lrelu':
        return LeakyReLU()
    elif name == 'trelu':
        return ThresholdedReLU()
    elif name == 'elu':
        return ELU()
    elif name == 'pelu':
        return PELU()
    elif name == 'psoftplus':
        return ParametricSoftplus()
    elif name == 'sigmoid':
        return Activation('sigmoid')
    elif name == 'relu':
        return Activation('relu')
    elif name == 'tanh':
        return Activation('tanh')
    elif name == 'softplus':
        return Activation('softplus')
Esempio n. 2
0
 def _get_act_by_name(self, act):
     str_act = ['relu', 'tanh', 'sigmoid', 'linear', 'softmax', 'softplus', 'softsign', 'hard_sigmoid']
     if (act in str_act):
         return Activation(act)
     else:
         return {'prelu': PReLU(), 'elu': ELU(), 'srelu': SReLU(), 'lrelu': LeakyReLU(),
                 'psoftplus': ParametricSoftplus(), 'trelu': ThresholdedReLU()}[act]
Esempio n. 3
0
def nips_conv(num_cells):
    """Hard-coded model for NIPS"""
    layers = list()
    input_shape = (40, 50, 50)

    # injected noise strength
    sigma = 0.1

    # convolutional layer sizes
    convlayers = [(16, 15), (8, 9)]

    # l2_weight_regularization for every layer
    l2_weight = 1e-3

    # weight and activity regularization
    W_reg = [(0., l2_weight), (0., l2_weight)]
    act_reg = [(0., 0.), (0., 0.)]

    # loop over convolutional layers
    for (n, size), w_args, act_args in zip(convlayers, W_reg, act_reg):
        args = (n, size, size)
        kwargs = {
            'border_mode': 'valid',
            'subsample': (1, 1),
            'init': 'normal',
            'W_regularizer': l1l2(*w_args),
            'activity_regularizer': activity_l1l2(*act_args),
        }
        if len(layers) == 0:
            kwargs['input_shape'] = input_shape

        # add convolutional layer
        layers.append(Convolution2D(*args, **kwargs))

        # add gaussian noise
        layers.append(GaussianNoise(sigma))

        # add ReLu
        layers.append(Activation('relu'))

    # flatten
    layers.append(Flatten())

    # Add a final dense (affine) layer
    layers.append(Dense(num_cells, init='normal',
                        W_regularizer=l1l2(0., l2_weight),
                        activity_regularizer=activity_l1l2(1e-3, 0.)))

    # Finish it off with a parameterized softplus
    layers.append(ParametricSoftplus())

    return layers
Esempio n. 4
0
def ln(input_shape, nout, weight_init='glorot_normal', l2_reg=0.0):
    """A linear-nonlinear stack of layers

    Parameters
    ----------
    input_shape : tuple
        The shape of the stimulus (e.g. (40,50,50))

    nout : int
        Number of output cells

    weight_init : string, optional
        Keras weight initialization (default: 'glorot_normal')

    l2_reg : float, optional
        l2 regularization on the weights (default: 0.0)
    """
    layers = list()
    layers.append(Flatten(input_shape=input_shape))
    layers.append(Dense(nout, init=weight_init, W_regularizer=l2(l2_reg)))
    layers.append(ParametricSoftplus())
    return layers
Esempio n. 5
0
def test_parametric_softplus():
    from keras.layers.advanced_activations import ParametricSoftplus
    np.random.seed(1337)
    inp = np.vstack((get_standard_values(), -get_standard_values()))
    # large values cause overflow in exp
    inp = inp[:-2]
    for alpha in [.5, -1., 1., 5]:
        for beta in [.5, -1., 1., 2]:
            layer = ParametricSoftplus(alpha_init=alpha,
                                       beta_init=beta,
                                       input_shape=inp.shape)
            layer.input = K.variable(inp)
            layer.build()
            for train in [True, False]:
                outp = K.eval(layer.get_output(train))
                assert_allclose(outp,
                                alpha * np.log(1. + np.exp(beta * inp)),
                                atol=1e-3)

            config = layer.get_config()
            assert config['alpha_init'] == alpha
            assert config['beta_init'] == beta
def test_parametric_softplus():
    from keras.layers.advanced_activations import ParametricSoftplus
    np.random.seed(1337)
    inp = np.vstack((get_standard_values(), -get_standard_values()))
    # large values cause overflow in exp
    inp = inp[:-2]
    for alpha in [.5, -1., 1., 5]:
        for beta in [.5, -1., 1., 2]:
            layer = ParametricSoftplus(alpha_init=alpha,
                                       beta_init=beta,
                                       input_shape=inp.shape)
            layer.input = K.variable(inp)
            layer.build()
            for train in [True, False]:
                outp = K.eval(layer.get_output(train))
                assert_allclose(outp, alpha*np.log(1.+np.exp(beta*inp)),
                                atol=1e-3)

            config = layer.get_config()
            assert config['alpha_init'] == alpha
            assert config['beta_init'] == beta
Esempio n. 7
0
from keras.optimizers import SGD
from keras.layers.advanced_activations import ParametricSoftExponential, ParametricSoftplus
from keras.regularizers import l1, activity_l1, l1l2

M = 30000
N = 1
nb_epoch = 100
num_experiments = 5


results = []
for i in range(num_experiments):
    X = pd.DataFrame(pd.np.random.randn(M, 2))
    y = pd.DataFrame((X.T.values[:N] * X.T.values[N:])).sum()
    row = []
    for act in [ParametricSoftExponential(0.0), ParametricSoftplus(), Activation('relu'), Activation('linear')]:
        model = Sequential()
        model.add(Dense(2 * N, input_dim=2 * N, W_regularizer=l1l2(0.005)))
        model.add(act)
        model.add(Dense(1 * N, input_dim=2 * N, W_regularizer=l1l2(0.005)))
        model.add(act)
        model.add(Dense(1, input_dim=1 * N, W_regularizer=None))  # l1l2(0.003)))
        model.add(Activation('linear'))

        # model.add(Dense(2, input_dim=2, W_regularizer=l1l2(0.001)))
        # # model.add(ParametricSoftplus(.2))
        # model.add(ParametricSoftExponential(.9, output_dim=1))
        # model.add(Dense(1, input_dim=2, W_regularizer=l1l2(0.001)))
        # # model.add(ParametricSoftplus(.2))
        # model.add(ParametricSoftExponential(-.9))
        # model.add(Dense(1, input_dim=2, W_regularizer=l1l2(0.001)))
Esempio n. 8
0
def convnet(input_shape, nout,
            num_filters=(8, 16), filter_size=(13, 13),
            weight_init='normal',
            l2_reg_weights=(0.0, 0.0, 0.0),
            l1_reg_weights=(0.0, 0.0, 0.0),
            l2_reg_activity=(0.0, 0.0, 0.0),
            l1_reg_activity=(0.0, 0.0, 0.0),
            dropout=(0.0, 0.0)):
    """Convolutional neural network

    Parameters
    ----------
    input_shape : tuple
        The shape of the stimulus (e.g. (40,50,50))

    nout : int
        Number of output cells

    num_filters : tuple, optional
        Number of filters in each layer. Default: (8, 16)

    filter_size : tuple, optional
        Convolutional filter size. Default: (13, 13)

    weight_init : string, optional
        Keras weight initialization (default: 'normal')

    l2_weights: tuple of floats, optional
        l2 regularization on the weights for each layer (default: 0.0)

    l2_activity: tuple of floats, optional
        l2 regularization on the activations for each layer (default: 0.0)

    dropout : tuple of floats, optional
        Fraction of units to 'dropout' for regularization (default: 0.0)
    """
    layers = list()

    def _regularize(layer_idx):
        """Small helper function to define per layer regularization"""
        return {
            'W_regularizer': l1l2(l1_reg_weights[layer_idx], l2_reg_weights[layer_idx]),
            'activity_regularizer': activity_l1l2(l1_reg_activity[layer_idx], l2_reg_activity[layer_idx]),
        }

    # first convolutional layer
    layers.append(Convolution2D(num_filters[0], filter_size[0], filter_size[1],
                                input_shape=input_shape, init=weight_init,
                                border_mode='valid', subsample=(1, 1),
                                **_regularize(0)))

    # Add relu activation
    layers.append(Activation('relu'))

    # max pooling layer
    layers.append(MaxPooling2D(pool_size=(2, 2)))

    # flatten
    layers.append(Flatten())

    # Dropout (first stage)
    layers.append(Dropout(dropout[0]))

    # Add dense (affine) layer
    layers.append(Dense(num_filters[1], init=weight_init, **_regularize(1)))

    # Add relu activation
    layers.append(Activation('relu'))

    # Dropout (second stage)
    layers.append(Dropout(dropout[1]))

    # Add a final dense (affine) layer
    layers.append(Dense(nout, init=weight_init, **_regularize(2)))

    # Finish it off with a parameterized softplus
    layers.append(ParametricSoftplus())

    return layers