コード例 #1
0
    def __init__(self, num_type: int, parameter_set: Dict = None):
        """
        Initialize exogenous intensity function: mu(t) = mu, mu in R^{C+1}, C is the number of event type
        :param num_type: for a point process with C types of events, num_type = C+1, in which the first type "0"
                         corresponds to an "empty" type never appearing in the sequence.
        :param parameter_set: a dictionary containing parameters
            parameter_set = {'activation': value = names of activation layers ('identity', 'relu', 'softplus')}
        """
        super(NaiveExogenousIntensity, self).__init__(num_type)
        activation = parameter_set['activation']
        if activation is None:
            self.exogenous_intensity_type = 'constant'
            self.activation = 'identity'
        else:
            self.exogenous_intensity_type = '{}(constant)'.format(activation)
            self.activation = activation

        self.num_type = num_type
        self.dim_embedding = 1
        self.emb = nn.Embedding(self.num_type, self.dim_embedding)
        self.emb.weight = nn.Parameter(
            torch.FloatTensor(self.num_type, self.dim_embedding).uniform_(
                0.01 / self.dim_embedding, 1 / self.dim_embedding))
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif self.activation == 'softplus':
            self.act = nn.Softplus(beta=self.num_type**0.5)
        elif self.activation == 'identity':
            self.act = Identity()
        else:
            logger.warning(
                'The actvation layer is {}, which can not be identified... '.
                format(self.activation))
            logger.warning('Identity activation is applied instead.')
            self.act = Identity()
コード例 #2
0
ファイル: HawkesProcess.py プロジェクト: nicktianboli/PoPPy
    def __init__(self,
                 exogenous_intensity,
                 endogenous_intensity,
                 activation: str = None,
                 prob=1.0):
        super(HawkesProcessIntensity, self).__init__()
        self.exogenous_intensity = exogenous_intensity
        self.endogenous_intensity = endogenous_intensity
        if activation is None:
            self.intensity_type = "exogenous intensity + endogenous impacts"
            self.activation = 'identity'
        else:
            self.intensity_type = "{}(exogenous intensity + endogenous impacts)".format(
                activation)
            self.activation = activation

        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif self.activation == 'softplus':
            self.act = nn.Softplus(beta=self.num_type**0.5)
        elif self.activation == 'identity':
            self.act = Identity()
        else:
            logger.warning(
                'The actvation layer is {}, which can not be identified... '.
                format(self.activation))
            logger.warning('Identity activation is applied instead.')
            self.act = Identity()
        self.prob = prob
コード例 #3
0
    def __init__(self, num_type: int, kernel, parameter_set: Dict):
        """
        Initialize endogenous impact: phi_{kk'}(t) = sum_{m} a_{kk'm} kernel_m(t),
        for m = 1, ..., M, A_m = [a_{kk'm}] in R^{C*C+1}, C is the number of event type
        :param num_type: for a point process with C types of events, num_type = C+1, in which the first type "0"
                         corresponds to an "empty" type never appearing in the sequence.
        :param kernel: an instance of a decay kernel class in "DecayKernelFamily"
        :param parameter_set: a dictionary containing parameters
            parameter_set = {'activation': value = names of activation layers ('identity', 'relu', 'softplus')
                             'dim_feature': value = the dimension of feature vector (embedding)}
        """
        super(FactorizedEndogenousImpact, self).__init__(num_type, kernel)
        activation = parameter_set['activation']
        dim_embedding = parameter_set['dim_embedding']
        if activation is None:
            self.endogenous_impact_type = "sum_m (u_{cm}^T * v_{c'm}) * kernel_m(t)"
            self.activation = 'identity'
        else:
            self.endogenous_impact_type = "sum_m {}(u_(cm)^T * v_(c'm)) * kernel_m(t))".format(activation)
            self.activation = activation

        self.decay_kernel = kernel
        self.num_base = self.decay_kernel.parameters.shape[1]
        self.num_type_u = num_type
        self.num_type_v = num_type
        self.dim_embedding = dim_embedding
        for m in range(self.num_base):
            emb_u = nn.Embedding(self.num_type_u, self.dim_embedding)
            emb_v = nn.Embedding(self.num_type_v, self.dim_embedding)
            emb_u.weight = nn.Parameter(
                           torch.FloatTensor(self.num_type_u, self.dim_embedding).uniform_(
                               0.01 / self.dim_embedding,
                               1 / self.dim_embedding))
            emb_v.weight = nn.Parameter(
                           torch.FloatTensor(self.num_type_v, self.dim_embedding).uniform_(
                               0.01 / self.dim_embedding,
                               1 / self.dim_embedding))
            if m == 0:
                self.basis_u = nn.ModuleList([emb_u])
                self.basis_v = nn.ModuleList([emb_v])
            else:
                self.basis_u.append(emb_u)
                self.basis_v.append(emb_v)

        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif self.activation == 'softplus':
            self.act = nn.Softplus(beta=self.num_type**0.5)
        elif self.activation == 'identity':
            self.act = Identity()
        else:
            logger.warning('The actvation layer is {}, which can not be identified... '.format(self.activation))
            logger.warning('Identity activation is applied instead.')
            self.act = Identity()