Beispiel #1
0
def construct_distributed_dense(input_tensor,
                                sizes,
                                acts='relu',
                                k_inits='he_uniform',
                                names=None):
    """"""

    # repeat options if singletons
    acts, k_inits, names = iter_or_rep(acts), iter_or_rep(
        k_inits), iter_or_rep(names)

    # list of tensors
    tensors = [input_tensor]

    # iterate over specified layers
    for s, act, k_init, name in zip(sizes, acts, k_inits, names):

        # define a dense layer that will be applied through time distributed
        d_layer = Dense(s, kernel_initializer=k_init)

        # append time distributed layer to list of ppm layers
        tdist_tensor = TimeDistributed(d_layer, name=name)(tensors[-1])
        tensors.extend([tdist_tensor, _apply_act(act, tdist_tensor)])

    return tensors
Beispiel #2
0
    def _process_hps(self):
        """See [`ArchBase`](#archbase) for how to pass in hyperparameters as
        well as defaults common to all EnergyFlow neural network models.

        **Required DNN Hyperparameters**

        - **input_dim** : _int_
            - The number of inputs to the model.
        - **dense_sizes** : {_tuple_, _list_} of _int_
            - The number of nodes in the dense layers of the model.

        **Default DNN Hyperparameters**

        - **acts**=`'relu'` : {_tuple_, _list_} of _str_ or Keras activation
            - Activation functions(s) for the dense layers. A single string or
            activation layer will apply the same activation to all dense layers.
            Keras advanced activation layers are also accepted, either as
            strings (which use the default arguments) or as Keras `Layer` 
            instances. If passing a single `Layer` instance, be aware that this
            layer will be used for all activations and may introduce weight 
            sharing (such as with `PReLU`); it is recommended in this case to 
            pass as many activations as there are layers in the model.See the
            [Keras activations docs](https://keras.io/activations/) for more 
            detail.
        - **k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_ or Keras 
        initializer
            - Kernel initializers for the dense layers. A single string 
            will apply the same initializer to all layers. See the
            [Keras initializer docs](https://keras.io/initializers/) for 
            more detail.
        - **dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the dense layers. A single float will
            apply the same dropout rate to all layers. See the [Keras
            Dropout layer](https://keras.io/layers/core/#dropout) for more 
            detail.
        - **l2_regs**=`0` : {_tuple_, _list_} of _float_
            - $L_2$-regulatization strength for both the weights and biases
            of the dense layers. A single float will apply the same
            $L_2$-regulatization to all layers.
        """

        # process generic NN hps
        super(DNN, self)._process_hps()

        # required hyperparameters
        self.input_dim = self._proc_arg('input_dim')
        self.dense_sizes = self._proc_arg('dense_sizes')

        # activations
        self.acts = iter_or_rep(self._proc_arg('acts', default='relu'))

        # initializations
        self.k_inits = iter_or_rep(
            self._proc_arg('k_inits', default='he_uniform'))

        # regularization
        self.dropouts = iter_or_rep(self._proc_arg('dropouts', default=0.))
        self.l2_regs = iter_or_rep(self._proc_arg('l2_regs', default=0.))

        self._verify_empty_hps()
Beispiel #3
0
def construct_dense(input_tensor, sizes,
                    acts='relu', k_inits='he_uniform',
                    dropouts=0., l2_regs=0.,
                    names=None):
    """"""
    
    # repeat options if singletons
    acts, k_inits, names = iter_or_rep(acts), iter_or_rep(k_inits), iter_or_rep(names)
    dropouts, l2_regs = iter_or_rep(dropouts), iter_or_rep(l2_regs)

    # lists of layers and tensors
    layers, tensors = [], [input_tensor]

    # iterate to make specified layers
    z = zip(sizes, acts, k_inits, dropouts, l2_regs, names)
    for s, act, k_init, dropout, l2_reg, name in z:

        # get layers and append them to list
        kwargs = ({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)} 
                  if l2_reg > 0. else {})
        dense_layer = Dense(s, kernel_initializer=k_init, name=name, **kwargs)
        act_layer = _get_act_layer(act)
        layers.extend([dense_layer, act_layer])

        # get tensors and append them to list
        tensors.append(dense_layer(tensors[-1]))
        tensors.append(act_layer(tensors[-1]))

        # apply dropout if specified
        if dropout > 0.:
            dr_name = None if name is None else '{}_dropout'.format(name)
            layers.append(Dropout(dropout, name=dr_name))
            tensors.append(layers[-1](tensors[-1]))

    return layers, tensors[1:]
def make_dense_layers(sizes, input_layer=None, input_shape=None, activations='relu', dropouts=0., l2_regs=0., k_inits='he_uniform'):

    # process options
    activations, dropouts = iter_or_rep(activations), iter_or_rep(dropouts)
    l2_regs, k_inits = iter_or_rep(l2_regs), iter_or_rep(k_inits)

    if input_shape is not None:
        input_layer = Input(shape=input_shape)

    # a list to store the layers
    dense_layers = [input_layer]

    # iterate over specified dense layers
    z = zip(sizes, activations, k_inits, dropouts, l2_regs)
    for i,(s, act, k_init, dropout, l2_reg) in enumerate(z):

        # construct variable argument dict
        kwargs = {'kernel_initializer': k_init}
        if l2_reg > 0.:
            kwargs.update({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)})

        # a new dense layer
        new_layer = _apply_act(act, Dense(s, **kwargs)(dense_layers[-1]))

        # apply dropout (does nothing if dropout is zero)
        if dropout > 0.:
            new_layer = Dropout(dropout)(new_layer)

        # apply new layer to previous and append to list
        dense_layers.append(new_layer)

    return dense_layers
Beispiel #5
0
def construct_distributed_dense(input_tensor, sizes, acts='relu', k_inits='he_uniform', 
                                                                  names=None, l2_regs=0.):
    """"""

    # repeat options if singletons
    acts, k_inits, names = iter_or_rep(acts), iter_or_rep(k_inits), iter_or_rep(names)
    l2_regs = iter_or_rep(l2_regs)
    
    # list of tensors
    layers, tensors = [], [input_tensor]

    # iterate over specified layers
    for s, act, k_init, name, l2_reg in zip(sizes, acts, k_inits, names, l2_regs):
        
        # define a dense layer that will be applied through time distributed
        kwargs = {} 
        if l2_reg > 0.:
            kwargs.update({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)})
        d_layer = Dense(s, kernel_initializer=k_init, **kwargs)

        # get layers and append them to list
        tdist_layer = TimeDistributed(d_layer, name=name)
        act_layer = _get_act_layer(act)
        layers.extend([tdist_layer, act_layer])

        # get tensors and append them to list
        tensors.append(tdist_layer(tensors[-1]))
        tensors.append(act_layer(tensors[-1]))

    return layers, tensors
Beispiel #6
0
    def process_hps(self):
        """See [`ArchBase`](#archbase) for how to pass in hyperparameters.

        **Required DNN Hyperparameters**

        - **input_dim** : _int_
            - The number of inputs to the model.
        - **dense_sizes** : {_tuple_, _list_} of _int_
            - The number of nodes in the dense layers of the model.

        **Default DNN Hyperparameters**

        - **acts**=`'relu'` : {_tuple_, _list_} of _str_
            - Activation functions(s) for the dense layers. A single string 
            will apply the same activation to all layers. See the
            [Keras activations docs](https://keras.io/activations/) for 
            more detail.
        - **k_inits**=`''he_uniform' : {_tuple_, _list_} of _str_
            - Kernel initializers for the dense layers. A single string 
            will apply the same initializer to all layers. See the
            [Keras initializer docs](https://keras.io/initializers/) for 
            more detail.
        - **dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the dense layers. A single float will
            apply the same dropout rate to all layers. See the [Keras
            Dropout layer](https://keras.io/layers/core/#dropout) for more 
            detail.
        - **l2_regs**=`0` : {_tuple_, _list_} of _float_
            - $L_2$-regulatization strength for both the weights and biases
            of the dense layers. A single float will apply the same
            $L_2$-regulatization to all layers.
        """

        # process generic NN hps
        super(DNN, self).process_hps()

        # required hyperparameters
        self.input_dim = self.hps['input_dim']
        self.dense_sizes = self.hps['dense_sizes']

        # activations
        self.acts = iter_or_rep(self.hps.get('acts', 'relu'))

        # initializations
        self.k_inits = iter_or_rep(self.hps.get('k_inits', 'he_uniform'))

        # regularization
        self.dropouts = iter_or_rep(self.hps.get('dropouts', 0))
        self.l2_regs = iter_or_rep(self.hps.get('l2_regs', 0))
Beispiel #7
0
def construct_dense(input_tensor,
                    sizes,
                    acts='relu',
                    k_inits='he_uniform',
                    dropouts=0.,
                    l2_regs=0.,
                    names=None):
    """"""

    # repeat options if singletons
    acts, k_inits = iter_or_rep(acts), iter_or_rep(k_inits)
    dropouts, l2_regs = iter_or_rep(dropouts), iter_or_rep(l2_regs)
    names = iter_or_rep(names)

    # list of tensors
    tensors = [input_tensor]

    # iterate to make specified layers
    z = zip(sizes, acts, k_inits, dropouts, l2_regs, names)
    for s, act, k_init, dropout, l2_reg, name in z:

        # make new dense layer
        kwargs = {}
        if l2_reg > 0.:
            kwargs.update({
                'kernel_regularizer': l2(l2_reg),
                'bias_regularizer': l2(l2_reg)
            })
        d_tensor = Dense(s, kernel_initializer=k_init, name=name,
                         **kwargs)(tensors[-1])
        tensors.extend([d_tensor, _apply_act(act, d_tensor)])

        # apply dropout if specified
        if dropout > 0.:
            dr_name = None if name is None else '{}_dropout'.format(name)
            tensors.append(Dropout(dropout, name=dr_name)(tensors[-1]))

    return tensors
Beispiel #8
0
    def _process_hps(self):
        r"""See [`ArchBase`](#archbase) for how to pass in hyperparameters as
        well as hyperparameters common to all EnergyFlow neural network models.

        **Required EFN Hyperparameters**

        - **input_dim** : _int_
            - The number of features for each particle.
        - **Phi_sizes** (formerly `ppm_sizes`) : {_tuple_, _list_} of _int_
            - The sizes of the dense layers in the per-particle frontend
            module $\Phi$. The last element will be the number of latent 
            observables that the model defines.
        - **F_sizes** (formerly `dense_sizes`) : {_tuple_, _list_} of _int_
            - The sizes of the dense layers in the backend module $F$.

        **Default EFN Hyperparameters**

        - **Phi_acts**=`'relu'` (formerly `ppm_acts`) : {_tuple_, _list_} of
        _str_ or Keras activation
            - Activation functions(s) for the dense layers in the 
            per-particle frontend module $\Phi$. A single string or activation
            layer will apply the same activation to all layers. Keras advanced
            activation layers are also accepted, either as strings (which use
            the default arguments) or as Keras `Layer` instances. If passing a
            single `Layer` instance, be aware that this layer will be used for
            all activations and may introduce weight sharing (such as with 
            `PReLU`); it is recommended in this case to pass as many activations
            as there are layers in the model. See the [Keras activations 
            docs](https://keras.io/activations/) for more detail.
        - **F_acts**=`'relu'` (formerly `dense_acts`) : {_tuple_, _list_} of
        _str_ or Keras activation
            - Activation functions(s) for the dense layers in the 
            backend module $F$. A single string or activation layer will apply
            the same activation to all layers.
        - **Phi_k_inits**=`'he_uniform'` (formerly `ppm_k_inits`) : {_tuple_,
        _list_} of _str_ or Keras initializer
            - Kernel initializers for the dense layers in the per-particle
            frontend module $\Phi$. A single string will apply the same
            initializer to all layers. See the [Keras initializer docs](https:
            //keras.io/initializers/) for more detail.
        - **F_k_inits**=`'he_uniform'` (formerly `dense_k_inits`) : {_tuple_,
        _list_} of _str_ or Keras initializer
            - Kernel initializers for the dense layers in the backend 
            module $F$. A single string will apply the same initializer 
            to all layers.
        - **latent_dropout**=`0` : _float_
            - Dropout rates for the summation layer that defines the
            value of the latent observables on the inputs. See the [Keras
            Dropout layer](https://keras.io/layers/core/#dropout) for more 
            detail.
        - **F_dropouts**=`0` (formerly `dense_dropouts`) : {_tuple_, _list_}
        of _float_
            - Dropout rates for the dense layers in the backend module $F$. 
            A single float will apply the same dropout rate to all dense layers.
        - **mask_val**=`0` : _float_
            - The value for which particles with all features set equal to
            this value will be ignored. The [Keras Masking layer](https://
            keras.io/layers/core/#masking) appears to have issues masking
            the biases of a network, so this has been implemented in a
            custom (and correct) manner since version `0.12.0`.
        """

        # process generic NN hps
        super(SymmetricPerParticleNN, self).process_hps()

        # required hyperparameters
        self.input_dim = self._proc_arg('input_dim')
        self.Phi_sizes = self._proc_arg('Phi_sizes', old='ppm_sizes')
        self.F_sizes = self._proc_arg('F_sizes', old='dense_sizes')

        # activations
        self.Phi_acts = iter_or_rep(
            self._proc_arg('Phi_acts', default='relu', old='ppm_acts'))
        self.F_acts = iter_or_rep(
            self._proc_arg('F_acts', default='relu', old='dense_acts'))

        # initializations
        self.Phi_k_inits = iter_or_rep(
            self._proc_arg('Phi_k_inits',
                           default='he_uniform',
                           old='ppm_k_inits'))
        self.F_k_inits = iter_or_rep(
            self._proc_arg('F_k_inits',
                           default='he_uniform',
                           old='dense_k_inits'))

        # regularizations
        #self.ppm_dropouts = iter_or_rep(self.hps.pop('ppm_dropouts', 0))
        self.latent_dropout = self._proc_arg('latent_dropout', default=0)
        self.F_dropouts = iter_or_rep(
            self._proc_arg('F_dropouts', default=0, old='dense_dropouts'))

        # masking
        self.mask_val = self._proc_arg('mask_val', default=0.)

        self._verify_empty_hps()
Beispiel #9
0
    def process_hps(self):
        """See [`ArchBase`](#archbase) for how to pass in hyperparameters.

        **Required CNN Hyperparameters**

        - **input_shape** : {_tuple_, _list_} of _int_
            - The shape of a single jet image. Assuming that `data_format`
            is set to `channels_first`, this is `(nb_chan,npix,npix)`.
        - **filter_sizes** : {_tuple_, _list_} of _int_
            - The size of the filters, which are taken to be square, in each 
            convolutional layer of the network. The length of the list will be
            the number of convolutional layers in the network.
        - **num_filters** : {_tuple_, _list_} of _int_
            - The number of filters in each convolutional layer. The length of 
            `num_filters` must match that of `filter_sizes`.

        **Default CNN Hyperparameters**

        - **dense_sizes**=`None` : {_tuple_, _list_} of _int_
            - The sizes of the dense layer backend. A value of `None` is 
            equivalent to an empty list.
        - **pool_sizes**=`None` : {_tuple_, _list_} of _int_
            - Size of maxpooling filter, taken to be a square. A value of 
            `None` will not use maxpooling.
        - **conv_acts**=`'relu'` : {_tuple_, _list_} of _str_
            - Activation function(s) for the conv layers. A single string 
            will apply the same activation to all conv layers. See the
            [Keras activations docs](https://keras.io/activations/) for 
            more detail.
        - **dense_acts**=`'relu'` : {_tuple_, _list_} of _str_
            - Activation functions(s) for the dense layers. A single string 
            will apply the same activation to all dense layers.
        - **conv_k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_
            - Kernel initializers for the convolutional layers. A single
            string will apply the same initializer to all layers. See the
            [Keras initializer docs](https://keras.io/initializers/) for 
            more detail.
        - **dense_k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_
            - Kernel initializers for the dense layers. A single string will 
            apply the same initializer to all layers.
        - **conv_dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the convolutional layers. A single float will
            apply the same dropout rate to all conv layers. See the [Keras
            Dropout layer](https://keras.io/layers/core/#dropout) for more 
            detail.
        - **num_spatial2d_dropout**=`0` : _int_
            - The number of convolutional layers, starting from the beginning
            of the model, for which to apply [SpatialDropout2D](https://keras
            .io/layers/core/#spatialdropout2d) instead of Dropout.
        - **dense_dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the dense layers. A single float will apply 
            the same dropout rate to all dense layers.
        - **paddings**=`'valid'` : {_tuple_, _list_} of _str_
            - Controls how the filters are convoled with the inputs. See
            the [Keras Conv2D layer](https://keras.io/layers/convolutional/#conv2d) 
            for more detail.
        - **data_format**=`'channels_first'` : {`'channels_first'`, `'channels_last'`}
            - Sets which axis is expected to contain the different channels.
        """

        # process generic NN hps
        super(CNN, self).process_hps()

        # required hyperparameters
        transfer(self, self.hps,
                 ['input_shape', 'filter_sizes', 'num_filters'])

        # required checks
        m = 'filter_sizes and num_filters must be the same length'
        assert len(self.filter_sizes) == len(self.num_filters), m

        # optional (but likely provided) hyperparameters with defaults
        self.pool_sizes = iter_or_rep(self.hps.get('pool_sizes', None))
        self.dense_sizes = self.hps.get('dense_sizes', None)

        # activations
        self.conv_acts = iter_or_rep(self.hps.get('conv_acts', 'relu'))
        self.dense_acts = iter_or_rep(self.hps.get('dense_acts', 'relu'))

        # initializations
        self.conv_k_inits = iter_or_rep(
            self.hps.get('conv_k_inits', 'he_uniform'))
        self.dense_k_inits = iter_or_rep(
            self.hps.get('dense_k_inits', 'he_uniform'))

        # regularization
        self.conv_dropouts = iter_or_rep(self.hps.get('conv_dropouts', 0))
        self.num_spatial2d_dropout = self.hps.get('num_spatial2d_dropout', 0)
        self.dense_dropouts = iter_or_rep(self.hps.get('dense_dropouts', 0))

        # padding
        self.paddings = iter_or_rep(self.hps.get('padding', 'valid'))
        self.data_format = self.hps.get('data_format', 'channels_first')
Beispiel #10
0
    def _process_hps(self):
        """See [`ArchBase`](#archbase) for how to pass in hyperparameters as
        well as hyperparameters common to all EnergyFlow neural network models.

        **Required CNN Hyperparameters**

        - **input_shape** : {_tuple_, _list_} of _int_
            - The shape of a single jet image. Assuming that `data_format`
            is set to `channels_first`, this is `(nb_chan,npix,npix)`.
        - **filter_sizes** : {_tuple_, _list_} of _int_
            - The size of the filters, which are taken to be square, in each 
            convolutional layer of the network. The length of the list will be
            the number of convolutional layers in the network.
        - **num_filters** : {_tuple_, _list_} of _int_
            - The number of filters in each convolutional layer. The length of 
            `num_filters` must match that of `filter_sizes`.

        **Default CNN Hyperparameters**

        - **dense_sizes**=`None` : {_tuple_, _list_} of _int_
            - The sizes of the dense layer backend. A value of `None` is 
            equivalent to an empty list.
        - **pool_sizes**=`0` : {_tuple_, _list_} of _int_
            - Size of maxpooling filter, taken to be a square. A value of 
            `0` will not use maxpooling.
        - **conv_acts**=`'relu'` : {_tuple_, _list_} of _str_  or Keras activation
            - Activation function(s) for the conv layers. A single string or
            activation layer will apply the same activation to all conv layers.
            Keras advanced activation layers are also accepted, either as
            strings (which use the default arguments) or as Keras `Layer` 
            instances. If passing a single `Layer` instance, be aware that this
            layer will be used for all activations and may introduce weight 
            sharing (such as with `PReLU`); it is recommended in this case to 
            pass as many activations as there are layers in the model.See the
            [Keras activations docs](https://keras.io/activations/) for more 
            detail.
        - **dense_acts**=`'relu'` : {_tuple_, _list_} of _str_  or Keras activation
            - Activation functions(s) for the dense layers. A single string 
            or activation layer will apply the same activation to all dense 
            layers.
        - **conv_k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_ or Keras initializer
            - Kernel initializers for the convolutional layers. A single
            string will apply the same initializer to all layers. See the
            [Keras initializer docs](https://keras.io/initializers/) for 
            more detail.
        - **dense_k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_ or Keras initializer
            - Kernel initializers for the dense layers. A single string will 
            apply the same initializer to all layers.
        - **conv_dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the convolutional layers. A single float will
            apply the same dropout rate to all conv layers. See the [Keras
            Dropout layer](https://keras.io/layers/core/#dropout) for more 
            detail.
        - **num_spatial2d_dropout**=`0` : _int_
            - The number of convolutional layers, starting from the beginning
            of the model, for which to apply [SpatialDropout2D](https://keras
            .io/layers/core/#spatialdropout2d) instead of Dropout.
        - **dense_dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the dense layers. A single float will apply 
            the same dropout rate to all dense layers.
        - **paddings**=`'valid'` : {_tuple_, _list_} of _str_
            - Controls how the filters are convoled with the inputs. See
            the [Keras Conv2D layer](https://keras.io/layers/convolutional/#conv2d) 
            for more detail.
        - **data_format**=`'channels_first'` : {`'channels_first'`, `'channels_last'`}
            - Sets which axis is expected to contain the different channels.
        """

        # process generic NN hps
        super(CNN, self).process_hps()

        # required hyperparameters
        self.input_shape = self._proc_arg('input_shape')
        self.filter_sizes = self._proc_arg('filter_sizes')
        self.num_filters = self._proc_arg('num_filters')

        # required checks
        m = 'filter_sizes and num_filters must be the same length'
        assert len(self.filter_sizes) == len(self.num_filters), m

        # optional (but likely provided) hyperparameters with defaults
        self.pool_sizes = iter_or_rep(self._proc_arg('pool_sizes', default=0))
        self.dense_sizes = self._proc_arg('dense_sizes', default=None)
        if self.dense_sizes is None:
            self.dense_sizes = []

        # activations
        self.conv_acts = iter_or_rep(
            self._proc_arg('conv_acts', default='relu'))
        self.dense_acts = iter_or_rep(
            self._proc_arg('dense_acts', default='relu'))

        # initializations
        self.conv_k_inits = iter_or_rep(
            self._proc_arg('conv_k_inits', default='he_uniform'))
        self.dense_k_inits = iter_or_rep(
            self._proc_arg('dense_k_inits', default='he_uniform'))

        # regularization
        self.conv_dropouts = iter_or_rep(
            self._proc_arg('conv_dropouts', default=0.))
        self.num_spatial2d_dropout = self._proc_arg('num_spatial2d_dropout',
                                                    default=0)
        self.dense_dropouts = iter_or_rep(
            self._proc_arg('dense_dropouts', default=0.))

        # padding
        self.paddings = iter_or_rep(self._proc_arg('padding', default='valid'))
        self.data_format = self._proc_arg('data_format',
                                          default='channels_first')

        self._verify_empty_hps()
Beispiel #11
0
    def process_hps(self):
        """See [`ArchBase`](#archbase) for how to pass in hyperparameters.

        **Required EFN Hyperparameters**

        - **input_dim** : _int_
            - The number of features for each particle.
        - **ppm_sizes** : {_tuple_, _list_} of _int_
            - The sizes of the dense layers in the per-particle frontend
            module $\\Phi$. The last element will be the number of latent 
            observables that the model defines.
        - **dense_sizes** : {_tuple_, _list_} of _int_
            - The sizes of the dense layers in the backend module $F$.

        **Default EFN Hyperparameters**

        - **ppm_acts**=`'relu'` : {_tuple_, _list_} of _str_
            - Activation functions(s) for the dense layers in the 
            per-particle frontend module $\\Phi$. A single string will apply 
            the same activation to all layers. See the [Keras activations 
            docs](https://keras.io/activations/) for more detail.
        - **dense_acts**=`'relu'` : {_tuple_, _list_} of _str_
            - Activation functions(s) for the dense layers in the 
            backend module $F$. A single string will apply  the same activation 
            to all layers.
        - **ppm_k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_
            - Kernel initializers for the dense layers in the per-particle
            frontend module $\\Phi$. A single string will apply the same initializer 
            to all layers. See the [Keras initializer docs](https://
            keras.io/initializers/) for more detail.
        - **dense_k_inits**=`'he_uniform'` : {_tuple_, _list_} of _str_
            - Kernel initializers for the dense layers in the backend 
            module $F$. A single string will apply the same initializer 
            to all layers.
        - **latent_dropout**=`0` : _float_
            - Dropout rates for the summation layer that defines the
            value of the latent observables on the inputs. See the [Keras
            Dropout layer](https://keras.io/layers/core/#dropout) for more 
            detail.
        - **dense_dropouts**=`0` : {_tuple_, _list_} of _float_
            - Dropout rates for the dense layers in the backend module $F$. 
            A single float will apply the same dropout rate to all dense layers.
        - **mask_val**=`0` : _float_
            - The value for which particles with all features set equal to
            this value will be ignored. See the [Keras Masking layer](https://
            keras.io/layers/core/#masking) for more detail.
        """

        # process generic NN hps
        super(SymmetricPerParticleNN, self).process_hps()

        # required hyperparameters
        self.input_dim = self.hps['input_dim']
        self.ppm_sizes = self.hps['ppm_sizes']
        self.dense_sizes = self.hps['dense_sizes']

        # activations
        self.ppm_acts = iter_or_rep(self.hps.get('ppm_acts', 'relu'))
        self.dense_acts = iter_or_rep(self.hps.get('dense_acts', 'relu'))

        # initializations
        self.ppm_k_inits = iter_or_rep(
            self.hps.get('ppm_k_inits', 'he_uniform'))
        self.dense_k_inits = iter_or_rep(
            self.hps.get('dense_k_inits', 'he_uniform'))

        # regularizations
        #self.ppm_dropouts = iter_or_rep(self.hps.get('ppm_dropouts', 0))
        self.latent_dropout = self.hps.get('latent_dropout', 0)
        self.dense_dropouts = iter_or_rep(self.hps.get('dense_dropouts', 0))

        # masking
        self.mask_val = self.hps.get('mask_val', 0.)