示例#1
0
    def __init__(
            self,
            # MLP
            units_mlp: List[int],
            activation_mlp: Optional[str] = "relu",
            dropout_mlp: Optional[float] = 0.,
            constraint_mlp: Optional[str] = None,
            activation_final_mlp: Optional[str] = "parametric_softplus",
            # Transformer
            units_rnn: Optional[int] = 16,
            layers_rnn: Optional[int] = 1,
            num_heads: Optional[int] = 1,
            activation_rnn: Optional[str] = "relu",
            dropout_rnn: Optional[float] = 0.,
            attn_activation: Optional[str] = "softmax",
            # Other params
            mc_prop_est: Optional[float] = 1.,
            emb_dim: Optional[int] = 4,
            temporal_scaling: Optional[float] = 1.,
            encoding: Optional[str] = "times_only",
            time_encoding: Optional[str] = "relative",
            marks: Optional[int] = 1,
            **kwargs):
        super(SelfAttentionCmDecoder,
              self).__init__(name="selfattention-cm",
                             input_size=encoding_size(encoding=encoding,
                                                      emb_dim=emb_dim),
                             mc_prop_est=mc_prop_est,
                             emb_dim=emb_dim,
                             temporal_scaling=temporal_scaling,
                             encoding=encoding,
                             time_encoding=time_encoding,
                             marks=marks,
                             **kwargs)
        self.mlp = MLP(units=units_mlp,
                       activations=activation_mlp,
                       constraint=constraint_mlp,
                       dropout_rates=dropout_mlp,
                       input_shape=self.encoding_size,
                       activation_final=activation_final_mlp)

        self.mlp1 = MLP(units=[1],
                        activations=None,
                        constraint="nonneg",
                        dropout_rates=None,
                        input_shape=1,
                        activation_final=None,
                        use_bias=False)
示例#2
0
 def __init__(
         self,
         # MLP args
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = None,
         # Other args
         emb_dim: Optional[int] = 1,
         embedding_constraint: Optional[str] = None,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     super(MLPVariableEncoder, self).__init__(
         name="mlp-variable",
         output_size=units_mlp[-1],
         emb_dim=emb_dim,
         embedding_constraint=embedding_constraint,
         temporal_scaling=temporal_scaling,
         encoding=encoding,
         time_encoding=time_encoding,
         marks=marks,
         **kwargs)
     self.mlp = MLP(
         units=units_mlp,
         activations=activation_mlp,
         constraint=constraint_mlp,
         dropout_rates=dropout_mlp,
         input_shape=self.encoding_size,
         activation_final=activation_final_mlp)
示例#3
0
    def __init__(self,
                 name: str,
                 input_size: Optional[int] = None,
                 emb_dim: Optional[int] = 1,
                 embedding_constraint: Optional[str] = None,
                 temporal_scaling: Optional[float] = 1.,
                 encoding: Optional[str] = "times_only",
                 time_encoding: Optional[str] = "relative",
                 marks: Optional[int] = 1,
                 **kwargs):
        super(VariableHistoryDecoder, self).__init__(name=name,
                                                     input_size=input_size,
                                                     marks=marks,
                                                     **kwargs)
        self.emb_dim = emb_dim
        self.encoding = encoding
        self.time_encoding = time_encoding
        self.embedding_constraint = embedding_constraint
        self.encoding_size = encoding_size(encoding=self.encoding,
                                           emb_dim=self.emb_dim)

        self.embedding = None
        if encoding in [
                "marks_only", "concatenate", "temporal_with_labels",
                "learnable_with_labels"
        ]:
            self.embedding = MLP(units=[self.emb_dim],
                                 activations=None,
                                 constraint=self.embedding_constraint,
                                 dropout_rates=0,
                                 input_shape=self.marks,
                                 activation_final=None,
                                 use_bias=False)

        self.temporal_enc = None
        if encoding in ["temporal", "temporal_with_labels"]:
            self.temporal_enc = SinusoidalEncoding(emb_dim=self.emb_dim,
                                                   scaling=temporal_scaling)
        elif encoding in ["learnable", "learnable_with_labels"]:
            self.temporal_enc = MLP(units=[self.emb_dim],
                                    activations=None,
                                    constraint=self.embedding_constraint,
                                    dropout_rates=0,
                                    input_shape=1,
                                    activation_final=None)
示例#4
0
 def __init__(
         self,
         # MLP
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = "parametric_softplus",
         # Transformer
         units_rnn: Optional[int] = 16,
         layers_rnn: Optional[int] = 1,
         n_heads: Optional[int] = 1,
         activation_rnn: Optional[str] = "relu",
         dropout_rnn: Optional[float] = 0.,
         attn_activation: Optional[str] = "softmax",
         constraint_rnn: Optional[str] = None,
         # Other params
         mc_prop_est: Optional[float] = 1.,
         emb_dim: Optional[int] = 2,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     super(SelfAttentionMCDecoder,
           self).__init__(name="selfattention-mc",
                          input_size=encoding_size(encoding=encoding,
                                                   emb_dim=emb_dim),
                          mc_prop_est=mc_prop_est,
                          emb_dim=emb_dim,
                          temporal_scaling=temporal_scaling,
                          encoding=encoding,
                          time_encoding=time_encoding,
                          marks=marks,
                          **kwargs)
     decoder_layer = TransformerDecoderLayer(
         d_model=self.encoding_size,
         nhead=n_heads,
         dim_feedforward=units_rnn,
         dropout=dropout_rnn,
         activation=activation_rnn,
         attn_activation=attn_activation,
         constraint=constraint_rnn,
         normalisation="layernorm")
     self.transformer_decoder = TransformerDecoderNetwork(
         decoder_layer=decoder_layer, num_layers=layers_rnn)
     self.mlp = MLP(units=units_mlp,
                    activations=activation_mlp,
                    constraint=constraint_mlp,
                    dropout_rates=dropout_mlp,
                    input_shape=self.encoding_size,
                    activation_final=activation_final_mlp)
     self.n_heads = n_heads
示例#5
0
 def __init__(
         self,
         # MLP
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = None,
         # Transformer
         units_rnn: Optional[int] = 16,
         layers_rnn: Optional[int] = 1,
         n_heads: Optional[int] = 1,
         activation_rnn: Optional[str] = "relu",
         dropout_rnn: Optional[float] = 0.,
         attn_activation: Optional[str] = "softmax",
         # Other
         allow_window_attention: Optional[bool] = False,
         emb_dim: Optional[int] = 2,
         embedding_constraint: Optional[str] = None,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     super(SelfAttentionEncoder,
           self).__init__(name="selfattention",
                          output_size=units_mlp[-1],
                          emb_dim=emb_dim,
                          embedding_constraint=embedding_constraint,
                          temporal_scaling=temporal_scaling,
                          encoding=encoding,
                          time_encoding=time_encoding,
                          marks=marks,
                          **kwargs)
     self.src_mask = None
     self.allow_window_attention = allow_window_attention
     encoder_layer = TransformerEncoderLayer(
         d_model=self.encoding_size,
         nhead=n_heads,
         dim_feedforward=units_rnn,
         dropout=dropout_rnn,
         activation=activation_rnn,
         attn_activation=attn_activation)
     self.transformer_encoder = TransformerEncoderNetwork(
         encoder_layer=encoder_layer, num_layers=layers_rnn)
     self.mlp = MLP(units=units_mlp,
                    activations=activation_mlp,
                    constraint=constraint_mlp,
                    dropout_rates=dropout_mlp,
                    input_shape=self.encoding_size,
                    activation_final=activation_final_mlp)
示例#6
0
    def __init__(
            self,
            # MLP
            units_mlp: List[int],
            activation_mlp: Optional[str] = "relu",
            dropout_mlp: Optional[float] = 0.,
            constraint_mlp: Optional[str] = "nonneg",
            activation_final_mlp: Optional[str] = "parametric_softplus",
            # Other params
            model_log_cm: Optional[bool] = False,
            do_zero_subtraction: Optional[bool] = True,
            emb_dim: Optional[int] = 2,
            encoding: Optional[str] = "times_only",
            time_encoding: Optional[str] = "relative",
            marks: Optional[int] = 1,
            **kwargs):

        if constraint_mlp is None:
            print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
            constraint_mlp = "nonneg"

        enc_size = encoding_size(encoding=encoding, emb_dim=emb_dim)
        input_size = units_mlp[0] - enc_size
        super(MLPCmDecoder,
              self).__init__(name="mlp-cm",
                             do_zero_subtraction=do_zero_subtraction,
                             model_log_cm=model_log_cm,
                             input_size=input_size,
                             emb_dim=emb_dim,
                             encoding=encoding,
                             time_encoding=time_encoding,
                             marks=marks,
                             **kwargs)
        self.mlp = MLP(
            units=units_mlp[1:],
            activations=activation_mlp,
            constraint=constraint_mlp,
            dropout_rates=dropout_mlp,
            # units_mlp in this class also provides the input dimensionality
            # of the mlp
            input_shape=units_mlp[0],
            activation_final=activation_final_mlp)
示例#7
0
 def __init__(
         self,
         # RNN args
         units_rnn: int,
         # MLP args
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = "parametric_softplus",
         # Other params
         mc_prop_est: Optional[float] = 1.,
         input_size: Optional[int] = None,
         emb_dim: Optional[int] = 1,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     super(NeuralHawkesDecoder,
           self).__init__(name="neural-hawkes",
                          mc_prop_est=mc_prop_est,
                          input_size=input_size,
                          emb_dim=emb_dim,
                          temporal_scaling=temporal_scaling,
                          encoding=encoding,
                          time_encoding=time_encoding,
                          marks=marks,
                          **kwargs)
     # Parameters
     self.weight_ih = nn.Parameter(th.Tensor(units_rnn, units_rnn * 7))
     self.weight_hh = nn.Parameter(th.Tensor(units_rnn, units_rnn * 7))
     self.bias = nn.Parameter(th.Tensor(units_rnn * 7))
     self.mlp = MLP(units=units_mlp,
                    activations=activation_mlp,
                    constraint=constraint_mlp,
                    dropout_rates=dropout_mlp,
                    input_shape=self.encoding_size,
                    activation_final=activation_final_mlp)
     self.units_rnn = units_rnn
     self.reset_parameters()
示例#8
0
 def __init__(self,
              units_mlp: List[int],
              activation_mlp: Optional[str] = "relu",
              dropout_mlp: Optional[float] = 0.,
              constraint_mlp: Optional[str] = None,
              activation_final_mlp: Optional[str] = None,
              history_size: Optional[int] = 2,
              marks: Optional[int] = 1,
              **kwargs):
     mlp = MLP(units=units_mlp,
               activations=activation_mlp,
               constraint=constraint_mlp,
               dropout_rates=dropout_mlp,
               input_shape=history_size,
               activation_final=activation_final_mlp)
     super(MLPFixedEncoder, self).__init__(name="mlp-fixed",
                                           net=mlp,
                                           output_size=units_mlp[-1],
                                           history_size=history_size,
                                           marks=marks,
                                           **kwargs)
示例#9
0
 def __init__(
         self,
         # MLP
         units_mlp: List[int],
         activation_mlp: Optional[str] = "relu",
         dropout_mlp: Optional[float] = 0.,
         constraint_mlp: Optional[str] = None,
         activation_final_mlp: Optional[str] = "parametric_softplus",
         # Other params
         mc_prop_est: Optional[float] = 1.,
         emb_dim: Optional[int] = 2,
         temporal_scaling: Optional[float] = 1.,
         encoding: Optional[str] = "times_only",
         time_encoding: Optional[str] = "relative",
         marks: Optional[int] = 1,
         **kwargs):
     enc_size = encoding_size(encoding=encoding, emb_dim=emb_dim)
     input_size = units_mlp[0] - enc_size
     if len(units_mlp) < 2:
         raise ValueError("Units of length at least 2 need to be specified")
     super(MLPMCDecoder, self).__init__(
         name="mlp-mc",
         input_size=input_size,
         mc_prop_est=mc_prop_est,
         emb_dim=emb_dim,
         temporal_scaling=temporal_scaling,
         encoding=encoding,
         time_encoding=time_encoding,
         marks=marks,
         **kwargs)
     self.mlp = MLP(
         units=units_mlp[1:],
         activations=activation_mlp,
         constraint=constraint_mlp,
         dropout_rates=dropout_mlp,
         # units_mlp in this class also provides the input dimensionality
         # of the mlp
         input_shape=units_mlp[0],
         activation_final=activation_final_mlp)
示例#10
0
    def __init__(
            self,
            # MLP
            units_mlp: List[int],
            activation_mlp: Optional[str] = "relu",
            dropout_mlp: Optional[float] = 0.,
            constraint_mlp: Optional[str] = None,
            activation_final_mlp: Optional[str] = "parametric_softplus",
            # Transformer
            units_rnn: Optional[int] = 16,
            layers_rnn: Optional[int] = 1,
            n_heads: Optional[int] = 1,
            activation_rnn: Optional[str] = "relu",
            dropout_rnn: Optional[float] = 0.,
            attn_activation: Optional[str] = "softmax",
            constraint_rnn: Optional[str] = None,
            # Other params
            do_zero_subtraction: Optional[bool] = True,
            model_log_cm: Optional[bool] = False,
            mc_prop_est: Optional[float] = 1.,
            emb_dim: Optional[int] = 4,
            temporal_scaling: Optional[float] = 1.,
            encoding: Optional[str] = "times_only",
            time_encoding: Optional[str] = "relative",
            marks: Optional[int] = 1,
            **kwargs):

        if constraint_rnn is None:
            print("Warning! SA decoder is unconstrained. Setting to `nonneg`")
            constraint_rnn = "nonneg"
        if constraint_mlp is None:
            print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
            constraint_mlp = "nonneg"

        super(SelfAttentionCmDecoder,
              self).__init__(name="selfattention-cm",
                             do_zero_subtraction=do_zero_subtraction,
                             model_log_cm=model_log_cm,
                             input_size=encoding_size(encoding=encoding,
                                                      emb_dim=emb_dim),
                             mc_prop_est=mc_prop_est,
                             emb_dim=emb_dim,
                             temporal_scaling=temporal_scaling,
                             encoding=encoding,
                             marks=marks,
                             **kwargs)
        decoder_layer = TransformerDecoderLayer(
            d_model=self.encoding_size,
            nhead=n_heads,
            dim_feedforward=units_rnn,
            dropout=dropout_rnn,
            activation=activation_rnn,
            attn_activation=attn_activation,
            constraint=constraint_rnn,
            normalisation="layernorm_with_running_stats")
        self.transformer_decoder = TransformerDecoderNetwork(
            decoder_layer=decoder_layer, num_layers=layers_rnn)
        self.mlp = MLP(units=units_mlp,
                       activations=activation_mlp,
                       constraint=constraint_mlp,
                       dropout_rates=dropout_mlp,
                       input_shape=self.encoding_size,
                       activation_final=activation_final_mlp)
        self.n_heads = n_heads
示例#11
0
    prev_time = t - events
    prev_time[prev_time < 0] = x_max + 1.
    prev_time = th.argmin(prev_time)
    prev_time = events[prev_time]
    prev_times[i] = prev_time

tau = x_train - prev_times

activations, constraint, activation_final = "relu", None, None
if cumulative:
    (activations, constraint,
     activation_final) = "gumbel", "nonneg", "parametric_softplus"

mlp = MLP(
    units=units,
    input_shape=1,
    activations=activations,
    constraint=constraint,
    activation_final=activation_final)

optimiser = th.optim.Adam(params=mlp.parameters(), lr=1.e-3)

mse = torch.nn.MSELoss()
tau_r = tau.reshape(-1, 1)
y_train_r = y_train.reshape(-1, 1)

if cumulative:
    tau_r.requires_grad = True

for i in range(epochs):
    optimiser.zero_grad()
    y_pred = mlp(tau_r)