コード例 #1
0
ファイル: fat_deep_ffm.py プロジェクト: codeants2012/torecsys
    def __init__(self,
                 embed_size       : int,
                 num_fields       : int,
                 deep_output_size : int,
                 deep_layer_sizes : List[int],
                 reduction        : int, 
                 ffm_dropout_p    : float = 0.0,
                 deep_dropout_p   : List[float] = None,
                 deep_activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel, self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # ffm's input shape = (B, N * N, E)
        # ffm's output shape = (B, NC2, E)
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)
        
        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        
        # deep's input shape = (B, NC2, E)
        # deep's output shape = (B, 1, O)
        self.deep = DNNLayer(
            output_size = deep_output_size,
            layer_sizes = deep_layer_sizes,
            embed_size  = embed_size,
            num_fields  = inputs_size,
            dropout_p   = deep_dropout_p,
            activation  = deep_activation
        )
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 senet_reduction: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 bilinear_type: str = "all",
                 bilinear_bias: bool = True,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            senet_reduction (int): Size of reduction in dense layer of senet.
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            bilinear_type (str, optional): Type of bilinear to calculate interactions.
                Defaults to "all".
            bilinear_bias (bool, optional): Flag to control using bias in bilinear-interactions.
                Defaults to True.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network.
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network.
                Defaults to nn.ReLU().
        
        Attributes:
            senet (nn.Module): Module of Squeeze-and-Excitation Network layer.
            bilinear (nn.Module): Module of Bilinear-interaction layer.
            deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(FeatureImportanceAndBilinearFeatureInteractionNetwork,
              self).__init__()

        # Initialize senet layer
        self.senet = SENETLayer(num_fields, senet_reduction)

        # Initialize bilinear interaction layer
        self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                     bilinear_type,
                                                     bilinear_bias)
        self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                       bilinear_type,
                                                       bilinear_bias)

        # Calculate inputs' size of DNNLayer, i.e. output's size of ffm (= NC2) * embed_size * 2
        inputs_size = combination(num_fields, 2)
        inputs_size = inputs_size * embed_size * 2

        # Initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
コード例 #3
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 output_size: int,
                 prod_method: str,
                 deep_layer_sizes: List[int],
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 **kwargs):
        r"""Initialize ProductNeuralNetworkModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            output_size (int): Output size of model
            prod_method (str): Method of product neural network. 
                Allow: [inner, outer].
            deep_layer_sizes (List[int]): Layer sizes of DNN
            deep_dropout_p (List[float], optional): Probability of Dropout in DNN. 
                Allow: [None, list of float for each layer]. 
                Defaults to None.
            deep_activation (Callable[[torch.Tensor], torch.Tensor], optional): Activation function of Linear. 
                Allow: [None, Callable[[T], T]]. 
                Defaults to nn.ReLU().
        
        Raises:
            ValueError: when prod_method is not in [inner, outer].
        """
        # refer to parent class
        super(ProductNeuralNetworkModel, self).__init__()

        # initialize product network
        if prod_method == "inner":
            self.pnn = InnerProductNetworkLayer(num_fields=num_fields)
        elif prod_method == "outer":
            self.pnn = OuterProductNetworkLayer(embed_size=embed_size,
                                                num_fields=num_fields,
                                                kernel_type=kwargs.get(
                                                    "kernel_type", "mat"))
        else:
            raise ValueError(
                "%s is not allowe in prod_method. Please use ['inner', 'outer']."
            )

        # calculate size of inputs of DNNLayer
        cat_size = 1 + num_fields + combination(num_fields, 2)

        # initialize dnn layer
        self.dnn = DNNLayer(output_size=output_size,
                            layer_sizes=deep_layer_sizes,
                            inputs_size=cat_size,
                            dropout_p=deep_dropout_p,
                            activation=deep_activation)

        # initialize bias variable
        self.bias = nn.Parameter(torch.zeros(1))
        nn.init.uniform_(self.bias.data)
コード例 #4
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 reduction: int,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        """Initialize FieldAttentiveDeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            reduction (int): Reduction of CIN layer
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            cen (nn.Module): Module of compose excitation network layer.
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # refer to parent class
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel,
              self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
コード例 #5
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize DeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            output_size (int, optional): Output size of model, 
                i.e. output size of the projection layer. 
                Defaults to 1.
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(DeepFieldAwareFactorizationMachineModel, self).__init__()

        # Initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # Calculate inputs' size of DNNLayer, i.e. output's size of ffm (= NC2) * embed_size
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # Initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
コード例 #6
0
ファイル: deep_ffm.py プロジェクト: codeants2012/torecsys
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 output_size: int = 1):
        r"""initialize Deep Field-aware Factorization Machine Model
        
        Args:
            embed_size (int): embedding size
            num_fields (int): number of fields in inputs
            deep_output_size (int): output size of deep neural network
            deep_layer_sizes (List[int]): layer sizes of deep neural network
            ffm_dropout_p (float, optional): dropout probability after ffm layer. Defaults to 0.0.
            deep_dropout_p (List[float], optional): dropout probability after each deep neural network. Allow: [None, List[float]]. Defaults to None.
            deep_activation (Callable[[T], T], optional): activation after each deep neural network. Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU().
            output_size (int, optional): output size of linear transformation after concatenate. Defaults to 1.
        """
        # initialize nn.Module class
        super(DeepFieldAwareFactorizationMachineModel, self).__init__()

        # sequential of second-order part in inputs
        self.second_order = nn.Sequential()
        # ffm's input shape = (B, N * N, E)
        # ffm's output shape = (B, NC2, E)
        self.second_order.add_module(
            "ffm", FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p))

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)

        # deep's input shape = (B, NC2, E)
        # deep's output shape = (B, 1, O)
        self.second_order.add_module(
            "deep",
            DNNLayer(output_size=deep_output_size,
                     layer_sizes=deep_layer_sizes,
                     embed_size=embed_size,
                     num_fields=inputs_size,
                     dropout_p=deep_dropout_p,
                     activation=deep_activation))
コード例 #7
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 prod_method: str = "inner",
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 **kwargs):
        r"""Initialize ProductNeuralNetworkModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_layer_sizes (List[int]): Layer sizes of dense network
            output_size (int): Output size of model
                i.e. output size of dense network. 
                Defaults to 1.
            prod_method (str): Method of product neural network. 
                Allow: [inner, outer].
                Defaults to inner.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Arguments:
            kernel_type (str): Type of kernel to compress outer-product.
        
        Attributes:
            pnn (nn.Module): Module of product neural network.
            deep (nn.Module): Module of dense layer.
            bias (nn.Parameter): Parameter of bias of field-aware factorization machine.

        Raises:
            ValueError: when prod_method is not in [inner, outer].
        """
        # Refer to parent class
        super(ProductNeuralNetworkModel, self).__init__()

        # Initialize product network
        if prod_method == "inner":
            self.pnn = InnerProductNetworkLayer(num_fields=num_fields)
        elif prod_method == "outer":
            self.pnn = OuterProductNetworkLayer(embed_size=embed_size,
                                                num_fields=num_fields,
                                                kernel_type=kwargs.get(
                                                    "kernel_type", "mat"))
        else:
            raise ValueError(
                "'%s' is not allowed in prod_method. Please use ['inner', 'outer']."
            )

        # Calculate size of inputs of dense layer
        cat_size = combination(num_fields, 2) + num_fields + 1

        # Initialize dense layer
        self.deep = DNNLayer(output_size=output_size,
                             layer_sizes=deep_layer_sizes,
                             inputs_size=cat_size,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)

        # Initialize bias parameter
        self.bias = nn.Parameter(torch.zeros((1, 1), names=("B", "O")))
        nn.init.uniform_(self.bias.data)