Exemple #1
0
 def __init__(
         self,
         # MLP
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = "parametric_softplus",
         # Transformer
         units_rnn: Optional[int] = 16,
         layers_rnn: Optional[int] = 1,
         n_heads: Optional[int] = 1,
         activation_rnn: Optional[str] = "relu",
         dropout_rnn: Optional[float] = 0.,
         attn_activation: Optional[str] = "softmax",
         constraint_rnn: Optional[str] = None,
         # Other params
         mc_prop_est: Optional[float] = 1.,
         emb_dim: Optional[int] = 2,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     super(SelfAttentionMCDecoder,
           self).__init__(name="selfattention-mc",
                          input_size=encoding_size(encoding=encoding,
                                                   emb_dim=emb_dim),
                          mc_prop_est=mc_prop_est,
                          emb_dim=emb_dim,
                          temporal_scaling=temporal_scaling,
                          encoding=encoding,
                          time_encoding=time_encoding,
                          marks=marks,
                          **kwargs)
     decoder_layer = TransformerDecoderLayer(
         d_model=self.encoding_size,
         nhead=n_heads,
         dim_feedforward=units_rnn,
         dropout=dropout_rnn,
         activation=activation_rnn,
         attn_activation=attn_activation,
         constraint=constraint_rnn,
         normalisation="layernorm")
     self.transformer_decoder = TransformerDecoderNetwork(
         decoder_layer=decoder_layer, num_layers=layers_rnn)
     self.mlp = MLP(units=units_mlp,
                    activations=activation_mlp,
                    constraint=constraint_mlp,
                    dropout_rates=dropout_mlp,
                    input_shape=self.encoding_size,
                    activation_final=activation_final_mlp)
     self.n_heads = n_heads
Exemple #2
0
    def __init__(
            self,
            # MLP
            units_mlp: List[int],
            activation_mlp: Optional[str] = "relu",
            dropout_mlp: Optional[float] = 0.,
            constraint_mlp: Optional[str] = None,
            activation_final_mlp: Optional[str] = "parametric_softplus",
            # Transformer
            units_rnn: Optional[int] = 16,
            layers_rnn: Optional[int] = 1,
            num_heads: Optional[int] = 1,
            activation_rnn: Optional[str] = "relu",
            dropout_rnn: Optional[float] = 0.,
            attn_activation: Optional[str] = "softmax",
            # Other params
            mc_prop_est: Optional[float] = 1.,
            emb_dim: Optional[int] = 4,
            temporal_scaling: Optional[float] = 1.,
            encoding: Optional[str] = "times_only",
            time_encoding: Optional[str] = "relative",
            marks: Optional[int] = 1,
            **kwargs):
        super(SelfAttentionCmDecoder,
              self).__init__(name="selfattention-cm",
                             input_size=encoding_size(encoding=encoding,
                                                      emb_dim=emb_dim),
                             mc_prop_est=mc_prop_est,
                             emb_dim=emb_dim,
                             temporal_scaling=temporal_scaling,
                             encoding=encoding,
                             time_encoding=time_encoding,
                             marks=marks,
                             **kwargs)
        self.mlp = MLP(units=units_mlp,
                       activations=activation_mlp,
                       constraint=constraint_mlp,
                       dropout_rates=dropout_mlp,
                       input_shape=self.encoding_size,
                       activation_final=activation_final_mlp)

        self.mlp1 = MLP(units=[1],
                        activations=None,
                        constraint="nonneg",
                        dropout_rates=None,
                        input_shape=1,
                        activation_final=None,
                        use_bias=False)
Exemple #3
0
    def __init__(self,
                 name: str,
                 input_size: Optional[int] = None,
                 emb_dim: Optional[int] = 1,
                 embedding_constraint: Optional[str] = None,
                 temporal_scaling: Optional[float] = 1.,
                 encoding: Optional[str] = "times_only",
                 time_encoding: Optional[str] = "relative",
                 marks: Optional[int] = 1,
                 **kwargs):
        super(VariableHistoryDecoder, self).__init__(name=name,
                                                     input_size=input_size,
                                                     marks=marks,
                                                     **kwargs)
        self.emb_dim = emb_dim
        self.encoding = encoding
        self.time_encoding = time_encoding
        self.embedding_constraint = embedding_constraint
        self.encoding_size = encoding_size(encoding=self.encoding,
                                           emb_dim=self.emb_dim)

        self.embedding = None
        if encoding in [
                "marks_only", "concatenate", "temporal_with_labels",
                "learnable_with_labels"
        ]:
            self.embedding = MLP(units=[self.emb_dim],
                                 activations=None,
                                 constraint=self.embedding_constraint,
                                 dropout_rates=0,
                                 input_shape=self.marks,
                                 activation_final=None,
                                 use_bias=False)

        self.temporal_enc = None
        if encoding in ["temporal", "temporal_with_labels"]:
            self.temporal_enc = SinusoidalEncoding(emb_dim=self.emb_dim,
                                                   scaling=temporal_scaling)
        elif encoding in ["learnable", "learnable_with_labels"]:
            self.temporal_enc = MLP(units=[self.emb_dim],
                                    activations=None,
                                    constraint=self.embedding_constraint,
                                    dropout_rates=0,
                                    input_shape=1,
                                    activation_final=None)
Exemple #4
0
 def __init__(
         self,
         # RNN args
         units_rnn: int,
         layers_rnn: int,
         dropout_rnn: float,
         # MLP args
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = None,
         # Other args
         emb_dim: Optional[int] = 1,
         embedding_constraint: Optional[str] = None,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     gru = nn.GRU(
         # This will become self.encoding_size later whoops.
         input_size=encoding_size(encoding=encoding, emb_dim=emb_dim),
         hidden_size=units_rnn,
         batch_first=True,
         bidirectional=False,
         dropout=dropout_rnn,
         num_layers=layers_rnn)
     super(GRUEncoder, self).__init__(
         name="gru",
         rnn=gru,
         units_mlp=units_mlp,
         activation=activation_mlp,
         dropout_mlp=dropout_mlp,
         constraint=constraint_mlp,
         activation_final_mlp=activation_final_mlp,
         emb_dim=emb_dim,
         embedding_constraint=embedding_constraint,
         temporal_scaling=temporal_scaling,
         encoding=encoding,
         time_encoding=time_encoding,
         marks=marks,
         **kwargs)
Exemple #5
0
    def __init__(
            self,
            # MLP
            units_mlp: List[int],
            activation_mlp: Optional[str] = "relu",
            dropout_mlp: Optional[float] = 0.,
            constraint_mlp: Optional[str] = "nonneg",
            activation_final_mlp: Optional[str] = "parametric_softplus",
            # Other params
            model_log_cm: Optional[bool] = False,
            do_zero_subtraction: Optional[bool] = True,
            emb_dim: Optional[int] = 2,
            encoding: Optional[str] = "times_only",
            time_encoding: Optional[str] = "relative",
            marks: Optional[int] = 1,
            **kwargs):

        if constraint_mlp is None:
            print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
            constraint_mlp = "nonneg"

        enc_size = encoding_size(encoding=encoding, emb_dim=emb_dim)
        input_size = units_mlp[0] - enc_size
        super(MLPCmDecoder,
              self).__init__(name="mlp-cm",
                             do_zero_subtraction=do_zero_subtraction,
                             model_log_cm=model_log_cm,
                             input_size=input_size,
                             emb_dim=emb_dim,
                             encoding=encoding,
                             time_encoding=time_encoding,
                             marks=marks,
                             **kwargs)
        self.mlp = MLP(
            units=units_mlp[1:],
            activations=activation_mlp,
            constraint=constraint_mlp,
            dropout_rates=dropout_mlp,
            # units_mlp in this class also provides the input dimensionality
            # of the mlp
            input_shape=units_mlp[0],
            activation_final=activation_final_mlp)
Exemple #6
0
 def __init__(
         self,
         # Other args
         emb_dim: Optional[int] = 1,
         embedding_constraint: Optional[str] = None,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     super(IdentityEncoder,
           self).__init__(name="identity",
                          output_size=encoding_size(encoding=encoding,
                                                    emb_dim=emb_dim),
                          emb_dim=emb_dim,
                          embedding_constraint=embedding_constraint,
                          temporal_scaling=temporal_scaling,
                          encoding=encoding,
                          time_encoding=time_encoding,
                          marks=marks,
                          **kwargs)
Exemple #7
0
 def __init__(
         self,
         # MLP
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = "parametric_softplus",
         # Other params
         mc_prop_est: Optional[float] = 1.,
         emb_dim: Optional[int] = 2,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     enc_size = encoding_size(encoding=encoding, emb_dim=emb_dim)
     input_size = units_mlp[0] - enc_size
     if len(units_mlp) < 2:
         raise ValueError("Units of length at least 2 need to be specified")
     super(MLPMCDecoder, self).__init__(
         name="mlp-mc",
         input_size=input_size,
         mc_prop_est=mc_prop_est,
         emb_dim=emb_dim,
         temporal_scaling=temporal_scaling,
         encoding=encoding,
         time_encoding=time_encoding,
         marks=marks,
         **kwargs)
     self.mlp = MLP(
         units=units_mlp[1:],
         activations=activation_mlp,
         constraint=constraint_mlp,
         dropout_rates=dropout_mlp,
         # units_mlp in this class also provides the input dimensionality
         # of the mlp
         input_shape=units_mlp[0],
         activation_final=activation_final_mlp)
    def __init__(
            self,
            # MLP
            units_mlp: List[int],
            activation_mlp: Optional[str] = "relu",
            dropout_mlp: Optional[float] = 0.,
            constraint_mlp: Optional[str] = None,
            activation_final_mlp: Optional[str] = "parametric_softplus",
            # Transformer
            units_rnn: Optional[int] = 16,
            layers_rnn: Optional[int] = 1,
            n_heads: Optional[int] = 1,
            activation_rnn: Optional[str] = "relu",
            dropout_rnn: Optional[float] = 0.,
            attn_activation: Optional[str] = "softmax",
            constraint_rnn: Optional[str] = None,
            # Other params
            do_zero_subtraction: Optional[bool] = True,
            model_log_cm: Optional[bool] = False,
            mc_prop_est: Optional[float] = 1.,
            emb_dim: Optional[int] = 4,
            temporal_scaling: Optional[float] = 1.,
            encoding: Optional[str] = "times_only",
            time_encoding: Optional[str] = "relative",
            marks: Optional[int] = 1,
            **kwargs):

        if constraint_rnn is None:
            print("Warning! SA decoder is unconstrained. Setting to `nonneg`")
            constraint_rnn = "nonneg"
        if constraint_mlp is None:
            print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
            constraint_mlp = "nonneg"

        super(SelfAttentionCmDecoder,
              self).__init__(name="selfattention-cm",
                             do_zero_subtraction=do_zero_subtraction,
                             model_log_cm=model_log_cm,
                             input_size=encoding_size(encoding=encoding,
                                                      emb_dim=emb_dim),
                             mc_prop_est=mc_prop_est,
                             emb_dim=emb_dim,
                             temporal_scaling=temporal_scaling,
                             encoding=encoding,
                             marks=marks,
                             **kwargs)
        decoder_layer = TransformerDecoderLayer(
            d_model=self.encoding_size,
            nhead=n_heads,
            dim_feedforward=units_rnn,
            dropout=dropout_rnn,
            activation=activation_rnn,
            attn_activation=attn_activation,
            constraint=constraint_rnn,
            normalisation="layernorm_with_running_stats")
        self.transformer_decoder = TransformerDecoderNetwork(
            decoder_layer=decoder_layer, num_layers=layers_rnn)
        self.mlp = MLP(units=units_mlp,
                       activations=activation_mlp,
                       constraint=constraint_mlp,
                       dropout_rates=dropout_mlp,
                       input_shape=self.encoding_size,
                       activation_final=activation_final_mlp)
        self.n_heads = n_heads