def __init__(self,
                 num_fields: int,
                 layer_sizes: List[int],
                 dropout_p: Optional[List[float]] = None,
                 activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize ElaboratedEntireSpaceSupervisedMultiTaskModel
        
        Args:
            num_fields (int): number of inputs' fields
            layer_sizes (List[int]): layer sizes of dense network
            dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            activation (Callable[[T], T], optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.impress_to_click_pooling = nn.AdaptiveAvgPool1d(1)
        self.click_to_d_action_pooling = nn.AdaptiveAvgPool1d(1)
        self.d_action_to_buy_pooling = nn.AdaptiveAvgPool1d(1)
        self.o_action_to_buy_pooling = nn.AdaptiveAvgPool1d(1)

        self.impress_to_click_deep = DNNLayer(num_fields, 1, layer_sizes,
                                              dropout_p, activation)
        self.click_to_d_action_deep = DNNLayer(num_fields, 1, layer_sizes,
                                               dropout_p, activation)
        self.d_action_to_buy_deep = DNNLayer(num_fields, 1, layer_sizes,
                                             dropout_p, activation)
        self.o_action_to_buy_deep = DNNLayer(num_fields, 1, layer_sizes,
                                             dropout_p, activation)
Example #2
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_layer_sizes: List[int],
                 fm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""initialize Deep Factorization Machine Model
        
        Args:
            embed_size (int): embedding size
            num_fields (int): number of fields in inputs
            deep_layer_sizes (List[int]): layer sizes of deep neural network
            fm_dropout_p (float, optional): dropout probability after factorization machine. Defaults to 0.0.
            deep_dropout_p (List[float], optional): dropout probability after activation of each layer. Allow: [None, list of float for each layer]. Defaults to None.
            deep_activation (Callable[[T], T], optional): activation function of each layer. Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU().
        """
        # initialize nn.Module class
        super(DeepFactorizationMachineModel, self).__init__()

        # layers (deep and fm) of second-order part of inputs
        self.fm = FMLayer(fm_dropout_p)
        self.deep = DNNLayer(output_size=1,
                             layer_sizes=deep_layer_sizes,
                             embed_size=embed_size,
                             num_fields=num_fields,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
Example #3
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 fm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""initialize Factorization-machine Supported Neural Network
        
        Args:
            embed_size (int): embedding size
            num_fields (int): number of fields in inputs
            deep_output_size (int): output size of deep neural network
            deep_layer_sizes (List[int]): layer sizes of deep neural network
            fm_dropout_p (float, optional): dropout probability after factorization machine. Defaults to 0.0.
            deep_dropout_p (List[float], optional): dropout probability after activation of each layer. Allow: [None, list of float for each layer]. Defaults to None.
            deep_activation (Callable[[T], T], optional): activation function of each layer. Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU().
        """
        super(FactorizationMachineSupportedNeuralNetworkModel, self).__init__()

        # initialize factorization machine layer
        self.fm = FMLayer(fm_dropout_p)

        # initialize dense layers
        cat_size = num_fields + embed_size
        self.deep = DNNLayer(output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             inputs_size=cat_size,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
Example #4
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 reduction: int,
                 ffm_dropout_p: Optional[float] = 0.0,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize FieldAttentiveDeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of embedder' fields
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            reduction (int): reduction of CIN layer
            ffm_dropout_p (float, optional): probability of Dropout in FFM. Defaults to 0.0
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.cen = CENLayer(num_fields, reduction)
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
    def __init__(self, 
                 embed_size       : int,
                 deep_output_size : int,
                 deep_layer_sizes : int,
                 deep_dropout_p   : List[float] = None,
                 deep_activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        r"""Initialize NeuralCollaborativeFilteringModel
        
        Args:
            embed_size (int): Size of embedding tensor
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (int): Layer sizes of dense network
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            deep (nn.Module): Module of dense layer.
            glm (nn.Module): Module of matrix factorization layer.
        """
        # refer to parent class
        super(NeuralCollaborativeFilteringModel, self).__init__()

        # initialize dense layer
        self.deep = DNNLayer(
            inputs_size = embed_size * 2,
            output_size = deep_output_size, 
            layer_sizes = deep_layer_sizes, 
            dropout_p   = deep_dropout_p, 
            activation  = deep_activation
        )

        # initialize gmf layer
        self.glm = GMFLayer()
Example #6
0
    def __init__(self,
                 embed_size       : int,
                 num_fields       : int,
                 deep_output_size : int,
                 deep_layer_sizes : List[int],
                 reduction        : int, 
                 ffm_dropout_p    : float = 0.0,
                 deep_dropout_p   : List[float] = None,
                 deep_activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel, self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # ffm's input shape = (B, N * N, E)
        # ffm's output shape = (B, NC2, E)
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)
        
        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        
        # deep's input shape = (B, NC2, E)
        # deep's output shape = (B, 1, O)
        self.deep = DNNLayer(
            output_size = deep_output_size,
            layer_sizes = deep_layer_sizes,
            embed_size  = embed_size,
            num_fields  = inputs_size,
            dropout_p   = deep_dropout_p,
            activation  = deep_activation
        )
Example #7
0
    def __init__(self,
                 embed_size: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 deep_dropout_p: List[float] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize NeuralCollaborativeFilteringModel
        
        Args:
            embed_size (int): size of embedding tensor
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.deep = DNNLayer(inputs_size=embed_size * 2,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)

        self.glm = GMFLayer()
Example #8
0
 def __init__(self, 
              embed_size       : int,
              deep_output_size : int,
              deep_layer_sizes : int,
              deep_dropout_p   : List[float] = None,
              deep_activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
     r"""inititalize neural collaborative filtering
     
     Args:
         embed_size (int): embedding size
         deep_output_size (int): output size of deep neural network
         deep_layer_sizes (int): layer sizes of deep neural network
         deep_dropout_p (List[float], optional): ropout probability in deep neural network. Allow: [None, list of float for each layer]. Defaults to None.
         deep_activation (Callable[[T], T], optional): activation function of each layer. Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU().
     """
     super(NeuralCollaborativeFilteringModel, self).__init__()
     self.mlp = DNNLayer(
         output_size = deep_output_size, 
         layer_sizes = deep_layer_sizes, 
         embed_size  = embed_size,
         num_fields  = 2,
         dropout_p   = deep_dropout_p, 
         activation  = deep_activation
     )
     self.glm = GMFLayer()
Example #9
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_layer_sizes: List[int],
                 fm_dropout_p: Optional[float] = None,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize DeepFactorizationMachineModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            deep_layer_sizes (List[int]): layer sizes of dense network
            fm_dropout_p (float, optional): probability of Dropout in FM. Defaults to 0.0
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.fm = FMLayer(fm_dropout_p)
        self.deep = DNNLayer(
            inputs_size=num_fields * embed_size,
            output_size=1,
            layer_sizes=deep_layer_sizes,
            dropout_p=deep_dropout_p,
            activation=deep_activation
        )
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 output_size: int,
                 prod_method: str,
                 deep_layer_sizes: List[int],
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 **kwargs):
        r"""Initialize ProductNeuralNetworkModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            output_size (int): Output size of model
            prod_method (str): Method of product neural network. 
                Allow: [inner, outer].
            deep_layer_sizes (List[int]): Layer sizes of DNN
            deep_dropout_p (List[float], optional): Probability of Dropout in DNN. 
                Allow: [None, list of float for each layer]. 
                Defaults to None.
            deep_activation (Callable[[torch.Tensor], torch.Tensor], optional): Activation function of Linear. 
                Allow: [None, Callable[[T], T]]. 
                Defaults to nn.ReLU().
        
        Raises:
            ValueError: when prod_method is not in [inner, outer].
        """
        # refer to parent class
        super(ProductNeuralNetworkModel, self).__init__()

        # initialize product network
        if prod_method == "inner":
            self.pnn = InnerProductNetworkLayer(num_fields=num_fields)
        elif prod_method == "outer":
            self.pnn = OuterProductNetworkLayer(embed_size=embed_size,
                                                num_fields=num_fields,
                                                kernel_type=kwargs.get(
                                                    "kernel_type", "mat"))
        else:
            raise ValueError(
                "%s is not allowe in prod_method. Please use ['inner', 'outer']."
            )

        # calculate size of inputs of DNNLayer
        cat_size = 1 + num_fields + combination(num_fields, 2)

        # initialize dnn layer
        self.dnn = DNNLayer(output_size=output_size,
                            layer_sizes=deep_layer_sizes,
                            inputs_size=cat_size,
                            dropout_p=deep_dropout_p,
                            activation=deep_activation)

        # initialize bias variable
        self.bias = nn.Parameter(torch.zeros(1))
        nn.init.uniform_(self.bias.data)
    def __init__(self,
                 pctr_model: nn.Module,
                 pos_model: nn.Module = None,
                 output_size: int = None,
                 max_num_position: int = None,
                 **kwargs):
        r"""Initialize PositionBiasAwareLearningFrameworkModel
        
        Args:
            pctr_model (nn.Module): model of CTR prediction
            pos_model (nn.Module, optional): Model of position-bias.
                Defaults to None.
            output_size (int, optional): Size of output tensor of click-through-rate model.
                Defaults to None.
            max_num_position (int, optional): Maximum length of list, i.e. Maximum number of postion.
                Defaults to None.
        
        Arguments:
            layer_sizes (List[int]): Layer sizes of DNNLayer.
            dropout_p (List[float]): Probability of Dropout in DNNLayer.
            activation (Callable[[T], T]): Activation function in DNNLayer.
        
        Attributes:
            pctr_model (nn.Module): model of CTR prediction.
            pos_model (nn.Module): Model of position-bias.

        """
        # refer to parent class
        super(PositionBiasAwareLearningFrameworkModel, self).__init__()

        # Initialize pCTR module
        self.pctr_model = pctr_model

        # Initialize position module
        if pos_model is not None:
            # Bind a nn.Module to pos_model
            self.pos_model = pos_model
        else:
            # Initialize sequential to store the module of positional embedding
            self.pos_model = nn.Sequential()

            # Initialize positional embedding layer
            self.pos_model.add_module(
                "PosEmbedding",
                PALLayer(input_size=output_size,
                         max_num_position=max_num_position))

            # Initialize dense layer after apply positional embedding bias
            self.pos_model.add_module(
                "Dense",
                DNNLayer(inputs_size=output_size,
                         output_size=1,
                         layer_sizes=kwargs.get("layer_sizes"),
                         dropout_p=kwargs.get("dropout_p"),
                         activation=kwargs.get("activation")))

            # Initialize sigmoid layer to transform outputs
            self.pos_model.add_module("Sigmoid", nn.Sigmoid())
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 senet_reduction: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 bilinear_type: str = "all",
                 bilinear_bias: bool = True,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            senet_reduction (int): Size of reduction in dense layer of senet.
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            bilinear_type (str, optional): Type of bilinear to calculate interactions.
                Defaults to "all".
            bilinear_bias (bool, optional): Flag to control using bias in bilinear-interactions.
                Defaults to True.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network.
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network.
                Defaults to nn.ReLU().
        
        Attributes:
            senet (nn.Module): Module of Squeeze-and-Excitation Network layer.
            bilinear (nn.Module): Module of Bilinear-interaction layer.
            deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(FeatureImportanceAndBilinearFeatureInteractionNetwork,
              self).__init__()

        # Initialize senet layer
        self.senet = SENETLayer(num_fields, senet_reduction)

        # Initialize bilinear interaction layer
        self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                     bilinear_type,
                                                     bilinear_bias)
        self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                       bilinear_type,
                                                       bilinear_bias)

        # Calculate inputs' size of DNNLayer, i.e. output's size of ffm (= NC2) * embed_size * 2
        inputs_size = combination(num_fields, 2)
        inputs_size = inputs_size * embed_size * 2

        # Initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
Example #13
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 prod_method: str = 'inner',
                 use_bias: Optional[bool] = True,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU(),
                 **kwargs):
        """
        Initialize ProductNeuralNetworkModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            deep_layer_sizes (List[int]): layer sizes of dense network
            output_size (int): output size of model. i.e. output size of dense network. Defaults to 1
            prod_method (str): method of product neural network. Allow: ["inner", "outer"]. Defaults to inner
            use_bias (bool, optional): whether the bias constant is concatenated to the input. Defaults to True
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        
        Arguments:
            kernel_type (str): type of kernel to compress outer-product.
        """
        super().__init__()

        if prod_method == 'inner':
            self.pnn = InnerProductNetworkLayer(num_fields=num_fields)
        elif prod_method == 'outer':
            self.pnn = OuterProductNetworkLayer(embed_size=embed_size,
                                                num_fields=num_fields,
                                                kernel_type=kwargs.get(
                                                    'kernel_type', 'mat'))
        else:
            raise ValueError(
                f'{prod_method} is not allowed in prod_method. Required: ["inner", "outer"].'
            )

        self.use_bias = use_bias

        cat_size = combination(num_fields, 2) + num_fields
        if self.use_bias:
            cat_size += 1
        self.deep = DNNLayer(output_size=output_size,
                             layer_sizes=deep_layer_sizes,
                             inputs_size=cat_size,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)

        if self.use_bias:
            self.bias = nn.Parameter(torch.zeros((1, 1), names=(
                'B',
                'O',
            )))
            nn.init.uniform_(self.bias.data)
    def __init__(self,
                 num_fields: int,
                 layer_sizes: List[int],
                 dropout_p: List[float] = None,
                 activation: Callable[[torch.Tensor],
                                      torch.Tensor] = nn.ReLU()):
        r"""Initialize ElaboratedEntireSpaceSupervisedMultiTaskModel
        
        Args:
            num_fields (int): Number of inputs' fields
            layer_sizes (List[int]): Layer sizes of dense network
            dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            impress_to_click_pooling (nn.Module): Module of 1D average pooling layer for impress_to_click
            click_to_daction_pooling (nn.Module): Module of 1D average pooling layer for click_to_daction
            daction_to_buy_pooling (nn.Module): Module of 1D average pooling layer for daction_to_buy
            oaction_to_buy_pooling (nn.Module): Module of 1D average pooling layer for oaction_to_buy
            impress_to_click_deep (nn.Module): Module of dense layer.
            click_to_daction_deep (nn.Module): Module of dense layer.
            daction_to_buy_deep (nn.Module): Module of dense layer.
            oaction_to_buy_deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(ElaboratedEntireSpaceSupervisedMultiTaskModel, self).__init__()

        # Initialize pooling layers
        self.impress_to_click_pooling = nn.AdaptiveAvgPool1d(1)
        self.click_to_daction_pooling = nn.AdaptiveAvgPool1d(1)
        self.daction_to_buy_pooling = nn.AdaptiveAvgPool1d(1)
        self.oaction_to_buy_pooling = nn.AdaptiveAvgPool1d(1)

        # Initialize dense layers
        self.impress_to_click_deep = DNNLayer(num_fields, 1, layer_sizes,
                                              dropout_p, activation)
        self.click_to_daction_deep = DNNLayer(num_fields, 1, layer_sizes,
                                              dropout_p, activation)
        self.daction_to_buy_deep = DNNLayer(num_fields, 1, layer_sizes,
                                            dropout_p, activation)
        self.oaction_to_buy_deep = DNNLayer(num_fields, 1, layer_sizes,
                                            dropout_p, activation)
Example #15
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 cin_layer_sizes: List[int],
                 deep_layer_sizes: List[int],
                 cin_is_direct: Optional[bool] = False,
                 cin_use_bias: Optional[bool] = True,
                 cin_use_batchnorm: Optional[bool] = True,
                 cin_activation: Optional[nn.Module] = nn.ReLU(),
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize XDeepFactorizationMachineModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            cin_layer_sizes (List[int]): layer sizes of compress interaction network
            deep_layer_sizes (List[int]): layer sizes of DNN
            cin_is_direct (bool, optional): whether outputs are passed to next step directly or not in compress
                interaction network. Defaults to False
            cin_use_bias (bool, optional): whether bias added to Conv1d or not in compress interaction network.
                Defaults to True
            cin_use_batchnorm (bool, optional): whether batch normalization is applied or not after Conv1d in
                compress interaction network. Defaults to True
            cin_activation (nn.Module, optional): activation function of Conv1d in compress interaction network.
                Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU()
            deep_dropout_p (List[float], optional): probability of Dropout in DNN.
                Allow: [None, list of float for each layer]. Defaults to None
            deep_activation (nn.Module, optional): activation function of Linear. Allow: [None, Callable[[T], T]].
                Defaults to nn.ReLU()
        """
        super().__init__()

        self.cin = CINLayer(
            embed_size=embed_size,
            num_fields=num_fields,
            output_size=1,
            layer_sizes=cin_layer_sizes,
            is_direct=cin_is_direct,
            use_bias=cin_use_bias,
            use_batchnorm=cin_use_batchnorm,
            activation=cin_activation
        )

        self.deep = DNNLayer(
            inputs_size=embed_size * num_fields,
            output_size=1,
            layer_sizes=deep_layer_sizes,
            dropout_p=deep_dropout_p,
            activation=deep_activation
        )

        self.bias = nn.Parameter(torch.zeros(1))
        nn.init.uniform_(self.bias.data)
Example #16
0
    def __init__(self,
                 num_fields: int,
                 layer_sizes: List[int],
                 dropout_p: Optional[List[float]] = None,
                 activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize EntireSpaceMultiTaskModel
        
        Args:
            num_fields (int): number of inputs' fields
            layer_sizes (List[int]): layer sizes of dense network
            dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None.
            activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU().
        """
        super().__init__()

        self.cvr_pooling = nn.AdaptiveAvgPool1d(1)
        self.ctr_pooling = nn.AdaptiveAvgPool1d(1)
        self.cvr_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p,
                                 activation)
        self.ctr_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p,
                                 activation)
Example #17
0
    def __init__(self,
                 num_fields: int,
                 layer_sizes: List[int],
                 dropout_p: List[float] = None,
                 activation: Callable[[torch.Tensor],
                                      torch.Tensor] = nn.ReLU()):
        r"""Initialize EntireSpaceMultiTaskModel
        
        Args:
            num_fields (int): Number of inputs' fields
            layer_sizes (List[int]): Layer sizes of dense network
            dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            cvr_pooling (nn.Module): Module of 1D average pooling layer for CVR prediction.
            cvr_deep (nn.Module): Module of dense layer.
            ctr_pooling (nn.Module): Module of 1D average pooling layer for CTR prediction.
            ctr_deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(EntireSpaceMultiTaskModel, self).__init__()

        # Initiailze pooling layer of CVR
        self.cvr_pooling = nn.AdaptiveAvgPool1d(1)

        # Initialize dense layer of CVR
        self.cvr_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p,
                                 activation)

        # Initialize pooling layer of CTR
        self.ctr_pooling = nn.AdaptiveAvgPool1d(1)

        # Initialize dense layer of CTR
        self.ctr_deep = DNNLayer(num_fields, 1, layer_sizes, dropout_p,
                                 activation)
Example #18
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 reduction: int,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        """Initialize FieldAttentiveDeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            reduction (int): Reduction of CIN layer
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            cen (nn.Module): Module of compose excitation network layer.
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # refer to parent class
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel,
              self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
Example #19
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 num_tasks: int,
                 num_experts: int,
                 expert_output_size: int,
                 expert_layer_sizes: List[int],
                 deep_layer_sizes: List[int],
                 expert_dropout_p: Optional[List[float]] = None,
                 deep_dropout_p: Optional[List[float]] = None,
                 expert_activation: Optional[nn.Module] = nn.ReLU(),
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize MultiGateMixtureOfExpertsModel

        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            num_tasks (int): number of tasks
            num_experts (int): number of experts
            expert_output_size (int): output size of expert layer
            expert_layer_sizes (List[int]): layer sizes of expert layer
            deep_layer_sizes: layer sizes of dense network
            expert_dropout_p: probability of Dropout in expert layer. Defaults to None
            deep_dropout_p: probability of Dropout in dense network. Defaults to None
            expert_activation: activation function of expert layer. Defaults to nn.ReLU()
            deep_activation: activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.num_tasks = num_tasks
        self.moe_layer = MOELayer(inputs_size=embed_size * num_fields,
                                  output_size=num_experts * expert_output_size,
                                  num_gates=num_tasks,
                                  num_experts=num_experts,
                                  expert_func=DNNLayer,
                                  expert_inputs_size=embed_size * num_fields,
                                  expert_output_size=expert_output_size,
                                  expert_layer_sizes=expert_layer_sizes,
                                  expert_dropout_p=expert_dropout_p,
                                  expert_activation=expert_activation)
        self.towers = nn.ModuleDict()
        for i in range(num_tasks):
            tower = DNNLayer(inputs_size=expert_output_size * num_experts,
                             output_size=1,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
            self.towers[f'Tower_{i}'] = tower
Example #20
0
    def __init__(self,
                 inputs_size: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 cross_num_layers: int,
                 output_size: int = 1,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize DeepAndCrossNetworkModel
        
        Args:
            inputs_size (int): Inputs size of dense network and cross network, 
                i.e. number of fields * embedding size.
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            cross_num_layers (int): Number of layers of Cross Network
            output_size (int, optional): Output size of model, 
                i.e. output size of the projection layer. 
                Defaults to 1.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network.
                Defaults to nn.ReLU().
        
        Attributes:
            deep (nn.Module): Module of dense layer.
            cross (nn.Module): Module of cross network layer.
            fc (nn.Module): Module of projection layer, i.e. linear layer of output.
        """
        # Refer to parent class
        super(DeepAndCrossNetworkModel, self).__init__()

        # Initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)

        # Initialize cross layer
        self.cross = CrossNetworkLayer(inputs_size=inputs_size,
                                       num_layers=cross_num_layers)

        # Initialize linear layer
        cat_size = deep_output_size + inputs_size
        self.fc = nn.Linear(cat_size, output_size)
    def __init__(self,
                 embed_size: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 fm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize NeuralFactorizationMachineModel.
        
        Args:
            embed_size (int): Size of embedding tensor
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            fm_dropout_p (float, optional): Probability of Dropout in FM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            sequential (nn.Sequential): Module of sequential moduels, including factorization
                machine layer and dense layer.
            bias (nn.Parameter): Parameter of bias of output projection.
        """
        # refer to parent class
        super(NeuralFactorizationMachineModel, self).__init__()

        # initialize sequential module
        self.sequential = nn.Sequential()

        # initialize fm layer
        self.sequential.add_module("B_interaction", FMLayer(fm_dropout_p))

        # initialize dense layer
        self.sequential.add_module(
            "Deep",
            DNNLayer(output_size=deep_output_size,
                     layer_sizes=deep_layer_sizes,
                     inputs_size=embed_size,
                     dropout_p=deep_dropout_p,
                     activation=deep_activation))

        # initialize bias parameter
        self.bias = nn.Parameter(torch.zeros((1, 1), names=("B", "O")))
        nn.init.uniform_(self.bias.data)
Example #22
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize DeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            output_size (int, optional): Output size of model, 
                i.e. output size of the projection layer. 
                Defaults to 1.
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(DeepFieldAwareFactorizationMachineModel, self).__init__()

        # Initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # Calculate inputs' size of DNNLayer, i.e. output's size of ffm (= NC2) * embed_size
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # Initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
Example #23
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 output_size: int = 1):
        r"""initialize Deep Field-aware Factorization Machine Model
        
        Args:
            embed_size (int): embedding size
            num_fields (int): number of fields in inputs
            deep_output_size (int): output size of deep neural network
            deep_layer_sizes (List[int]): layer sizes of deep neural network
            ffm_dropout_p (float, optional): dropout probability after ffm layer. Defaults to 0.0.
            deep_dropout_p (List[float], optional): dropout probability after each deep neural network. Allow: [None, List[float]]. Defaults to None.
            deep_activation (Callable[[T], T], optional): activation after each deep neural network. Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU().
            output_size (int, optional): output size of linear transformation after concatenate. Defaults to 1.
        """
        # initialize nn.Module class
        super(DeepFieldAwareFactorizationMachineModel, self).__init__()

        # sequential of second-order part in inputs
        self.second_order = nn.Sequential()
        # ffm's input shape = (B, N * N, E)
        # ffm's output shape = (B, NC2, E)
        self.second_order.add_module(
            "ffm", FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p))

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)

        # deep's input shape = (B, NC2, E)
        # deep's output shape = (B, 1, O)
        self.second_order.add_module(
            "deep",
            DNNLayer(output_size=deep_output_size,
                     layer_sizes=deep_layer_sizes,
                     embed_size=embed_size,
                     num_fields=inputs_size,
                     dropout_p=deep_dropout_p,
                     activation=deep_activation))
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 fm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize NeuralFactorizationMachineLayer.
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of DNN
            deep_layer_sizes (List[int]): Layer sizes of DNN
            fm_dropout_p (float, optional): Probability of Dropout in FM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in DNN. 
                Allow: [None, list of float for each layer]. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of Linear. 
                Allow: [None, Callable[[T], T]]. 
                Defaults to nn.ReLU().
        """
        # refer to parent class
        super(NeuralFactorizationMachineLayer, self).__init__()

        # initialize sequential of model
        self.sequential = nn.Sequential()

        # add modules to model
        self.sequential.add_module("b_interaction", FMLayer(fm_dropout_p))
        self.sequential.add_module(
            "hidden",
            DNNLayer(output_size=deep_output_size,
                     layer_sizes=deep_layer_sizes,
                     inputs_size=cat_size,
                     dropout_p=deep_dropout_p,
                     activation=deep_activation))

        # initialize bias variable
        self.bias = nn.Parameter(torch.zeros(1))
        nn.init.uniform_(self.bias.data)
Example #25
0
    def __init__(self,
                 embed_size         : int,
                 num_fields         : int,
                 num_tasks          : int,
                 num_experts        : int,
                 expert_output_size : int,
                 expert_layer_sizes : List[int],
                 deep_layer_sizes   : List[int],
                 expert_dropout_p   : List[float],
                 deep_dropout_p     : List[float],
                 expert_activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU(),
                 deep_activation    : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        # refer to parent class
        super(MultigateMixtureOfExpertsModel, self).__init__()

        # Bind num_tasks to num_tasks
        self.num_tasks = num_tasks

        # Initialize a multi-gate mixture of experts layer
        self.mmoe = MOELayer(
            inputs_size = embed_size * num_fields,
            output_size = num_experts * expert_output_size,
            num_gates   = num_tasks,
            num_experts = num_experts,
            expert_func = DNNLayer,
            expert_inputs_size = embed_size * num_fields,
            expert_output_size = expert_output_size,
            expert_layer_sizes = expert_layer_sizes,
            expert_dropout_p   = expert_dropout_p,
            expert_activation  = expert_activation
        )

        # Initialize a dictionary of tasks' models
        self.towers = nn.ModuleDict()
        for i in range(num_tasks):
            tower = DNNLayer(
                inputs_size = expert_output_size * num_experts,
                output_size = 1,
                layer_sizes = deep_layer_sizes,
                dropout_p   = deep_dropout_p,
                activation  = deep_activation
            )
            self.towers[("Tower_%d" % i)] = tower
Example #26
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 senet_reduction: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 bilinear_type: Optional[str] = 'all',
                 bilinear_bias: Optional[bool] = True,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            senet_reduction (int): size of reduction in dense layer of senet.
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to "all"
            bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        inputs_size = combination(num_fields, 2)
        inputs_size = inputs_size * embed_size * 2

        self.senet = SENETLayer(num_fields, senet_reduction, squared=False)
        self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                     bilinear_type,
                                                     bilinear_bias)
        self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                       bilinear_type,
                                                       bilinear_bias)
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
Example #27
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 fm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        r"""Initialize FactorizationMachineSupportedNeuralNetworkModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            fm_dropout_p (float, optional): Probability of Dropout in FM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            fm (nn.Module): Module of factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # refer to parent class
        super(FactorizationMachineSupportedNeuralNetworkModel, self).__init__()

        # initialize fm layer
        self.fm = FMLayer(fm_dropout_p)

        # initialize dense layers
        cat_size = num_fields + embed_size
        self.deep = DNNLayer(
            inputs_size=cat_size,
            output_size=deep_output_size,
            layer_sizes=deep_layer_sizes,
            dropout_p=deep_dropout_p,
            activation=deep_activation
        )
    def __init__(self,
                 pctr_model: nn.Module,
                 pos_model: Optional[nn.Module] = None,
                 output_size: Optional[int] = None,
                 max_num_position: Optional[int] = None,
                 **kwargs):
        """
        Initialize PositionBiasAwareLearningFrameworkModel
        
        Args:
            pctr_model (nn.Module): model of CTR prediction
            pos_model (nn.Module, optional): model of position-bias. Defaults to None
            output_size (int, optional): size of output tensor of click-through-rate model. Defaults to None
            max_num_position (int, optional): maximum length of list, i.e. Maximum number of position. Defaults to None
        
        Arguments:
            layer_sizes (List[int]): Layer sizes of DNNLayer.
            dropout_p (List[float]): Probability of Dropout in DNNLayer.
            activation (nn.Module): Activation function in DNNLayer.
        """
        super().__init__()

        self.pctr_model = pctr_model

        if pos_model is not None:
            self.pos_model = pos_model
        else:
            self.pos_model = nn.Sequential()
            self.pos_model.add_module(
                'PosEmbedding',
                PALLayer(input_size=output_size,
                         max_num_position=max_num_position))
            self.pos_model.add_module(
                'Dense',
                DNNLayer(inputs_size=output_size,
                         output_size=1,
                         layer_sizes=kwargs.get('layer_sizes'),
                         dropout_p=kwargs.get('dropout_p'),
                         activation=kwargs.get('activation')))
            self.pos_model.add_module('Sigmoid', nn.Sigmoid())
    def __init__(self,
                 embed_size: int,
                 deep_layer_sizes: List[int],
                 use_bias: Optional[bool] = True,
                 fm_dropout_p: Optional[float] = None,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize NeuralFactorizationMachineModel.
        
        Args:
            embed_size (int): size of embedding tensor
            deep_layer_sizes (List[int]): layer sizes of dense network
            use_bias (bool, optional): whether the bias constant is concatenated to the input. Defaults to True
            fm_dropout_p (float, optional): probability of Dropout in FM. Defaults to None
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (Callable[[T], T], optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.sequential = nn.Sequential()
        self.sequential.add_module('B_interaction', FMLayer(fm_dropout_p))
        self.sequential.add_module(
            'Deep',
            DNNLayer(output_size=1,
                     layer_sizes=deep_layer_sizes,
                     inputs_size=embed_size,
                     dropout_p=deep_dropout_p,
                     activation=deep_activation))

        self.use_bias = use_bias
        if self.use_bias:
            self.bias = nn.Parameter(torch.zeros((1, 1), names=(
                'B',
                'O',
            )))
            nn.init.uniform_(self.bias.data)
    def __init__(self,
                 inputs_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 cross_num_layers: int,
                 output_size: int = 1,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize DeepAndCrossNetworkModel
        
        Args:
            inputs_size (int): inputs size of dense network and cross network, i.e. number of fields * embedding size
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            cross_num_layers (int): number of layers of Cross Network
            output_size (int, optional): output size of model, i.e. output size of the projection layer. Defaults to 1
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.deep = DNNLayer(
            inputs_size=inputs_size,
            output_size=deep_output_size,
            layer_sizes=deep_layer_sizes,
            dropout_p=deep_dropout_p,
            activation=deep_activation
        )
        self.cross = CrossNetworkLayer(
            inputs_size=inputs_size,
            num_layers=cross_num_layers
        )
        cat_size = (deep_output_size + inputs_size) * num_fields
        self.fc = nn.Linear(cat_size, output_size)