示例#1
0
    def __init__(self,
                 embed_size       : int,
                 num_fields       : int,
                 deep_output_size : int,
                 deep_layer_sizes : List[int],
                 reduction        : int, 
                 ffm_dropout_p    : float = 0.0,
                 deep_dropout_p   : List[float] = None,
                 deep_activation  : Callable[[torch.Tensor], torch.Tensor] = nn.ReLU()):
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel, self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # ffm's input shape = (B, N * N, E)
        # ffm's output shape = (B, NC2, E)
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)
        
        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        
        # deep's input shape = (B, NC2, E)
        # deep's output shape = (B, 1, O)
        self.deep = DNNLayer(
            output_size = deep_output_size,
            layer_sizes = deep_layer_sizes,
            embed_size  = embed_size,
            num_fields  = inputs_size,
            dropout_p   = deep_dropout_p,
            activation  = deep_activation
        )
示例#2
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 dropout_p: float = 0.0):
        r"""Initialize FieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
        
        Attributes:
            ffm (nn.Module): Module of field-aware factorization machine layer.
            bias (nn.Parameter): Parameter of bias of field-aware factorization machine.
        """
        # refer to parent class
        super(FieldAwareFactorizationMachineModel, self).__init__()

        # initialize ffm layer
        self.ffm = FFMLayer(num_fields, dropout_p=dropout_p)

        # initialize bias parameter
        self.bias = nn.Parameter(torch.zeros((1, 1), names=("B", "O")))
        nn.init.uniform_(self.bias.data)
示例#3
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 ffm_dropout_p: Optional[float] = None,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize DeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            ffm_dropout_p (float, optional): probability of Dropout in FFM. Defaults to 0.0
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
示例#4
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 reduction: int,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        """Initialize FieldAttentiveDeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            reduction (int): Reduction of CIN layer
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            cen (nn.Module): Module of compose excitation network layer.
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # refer to parent class
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel,
              self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
    def __init__(self,
                 num_fields: int,
                 dropout_p: Optional[float] = 0.0):
        """
        Initialize FieldAwareFactorizationMachineModel
        
        Args:
            num_fields (int): number of inputs' fields
            dropout_p (float, optional): probability of Dropout in FFM. Defaults to 0.0
        """
        super().__init__()

        self.ffm = FFMLayer(num_fields, dropout_p=dropout_p)
        self.bias = nn.Parameter(torch.zeros((1, 1,), names=('B', 'O',)))
        nn.init.uniform_(self.bias.data)
示例#6
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        r"""Initialize DeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            output_size (int, optional): Output size of model, 
                i.e. output size of the projection layer. 
                Defaults to 1.
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # Refer to parent class
        super(DeepFieldAwareFactorizationMachineModel, self).__init__()

        # Initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # Calculate inputs' size of DNNLayer, i.e. output's size of ffm (= NC2) * embed_size
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # Initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
示例#7
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 output_size: int = 1):
        r"""initialize Deep Field-aware Factorization Machine Model
        
        Args:
            embed_size (int): embedding size
            num_fields (int): number of fields in inputs
            deep_output_size (int): output size of deep neural network
            deep_layer_sizes (List[int]): layer sizes of deep neural network
            ffm_dropout_p (float, optional): dropout probability after ffm layer. Defaults to 0.0.
            deep_dropout_p (List[float], optional): dropout probability after each deep neural network. Allow: [None, List[float]]. Defaults to None.
            deep_activation (Callable[[T], T], optional): activation after each deep neural network. Allow: [None, Callable[[T], T]]. Defaults to nn.ReLU().
            output_size (int, optional): output size of linear transformation after concatenate. Defaults to 1.
        """
        # initialize nn.Module class
        super(DeepFieldAwareFactorizationMachineModel, self).__init__()

        # sequential of second-order part in inputs
        self.second_order = nn.Sequential()
        # ffm's input shape = (B, N * N, E)
        # ffm's output shape = (B, NC2, E)
        self.second_order.add_module(
            "ffm", FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p))

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)

        # deep's input shape = (B, NC2, E)
        # deep's output shape = (B, 1, O)
        self.second_order.add_module(
            "deep",
            DNNLayer(output_size=deep_output_size,
                     layer_sizes=deep_layer_sizes,
                     embed_size=embed_size,
                     num_fields=inputs_size,
                     dropout_p=deep_dropout_p,
                     activation=deep_activation))