コード例 #1
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 ffm_dropout_p: Optional[float] = None,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize DeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            ffm_dropout_p (float, optional): probability of Dropout in FFM. Defaults to 0.0
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
コード例 #2
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 bilinear_type: str = "all",
                 bias: bool = True):
        """Initialize BilinearInteractionLayer
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            bilinear_type (str, optional): Type of bilinear to calculate interactions. 
                Defaults to "all".
            bias (bool, optional): Flag to control using bias. 
                Defaults to True.
        

        Attribute:
            row_idx (T), dtype = torch.long: 1st indices to index inputs in 2nd dimension for inner product.
            col_idx (T), dtype = torch.long: 2nd indices to index inputs in 2nd dimension for inner product.
            bilinear (nn.Module): Module of bilinear-interaction.
            bilinear_type (str): Type of bilinear to calculate interactions.
        
        Raises:
            NotImplementedError: /
            ValueError: when bilinear_type is not in ["all", "each", "interaction"]
        """
        # Refer to parent class
        super(BilinearInteractionLayer, self).__init__()

        # Create row_idx and col_idx to index inputs for outer product
        self.row_idx = list()
        self.col_idx = list()
        for i in range(num_fields - 1):
            for j in range(i + 1, num_fields):
                self.row_idx.append(i)
                self.col_idx.append(j)
        self.row_idx = torch.LongTensor(self.row_idx)
        self.col_idx = torch.LongTensor(self.col_idx)

        # calculate number of interactive fields
        num_interaction = combination(num_fields, 2)

        if bilinear_type == "all":
            self.bilinear = FieldAllTypeBilinear(embed_size,
                                                 embed_size,
                                                 bias=bias)
        elif bilinear_type == "each":
            self.bilinear = FieldEachTypeBilinear(num_interaction,
                                                  embed_size,
                                                  embed_size,
                                                  bias=bias)
        elif bilinear_type == "interaction":
            raise NotImplementedError()
            # self.bilinear = FieldInteractionTypeBilinear(num_interaction, embed_size, embed_size, bias=bias)
        else:
            raise ValueError(
                'bilinear_type only allows: ["all", "each", "interaction"].')

        # Bind kernel_type to kernel_type
        self.bilinear_type = bilinear_type
コード例 #3
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 prod_method: str = 'inner',
                 use_bias: Optional[bool] = True,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU(),
                 **kwargs):
        """
        Initialize ProductNeuralNetworkModel
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            deep_layer_sizes (List[int]): layer sizes of dense network
            output_size (int): output size of model. i.e. output size of dense network. Defaults to 1
            prod_method (str): method of product neural network. Allow: ["inner", "outer"]. Defaults to inner
            use_bias (bool, optional): whether the bias constant is concatenated to the input. Defaults to True
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        
        Arguments:
            kernel_type (str): type of kernel to compress outer-product.
        """
        super().__init__()

        if prod_method == 'inner':
            self.pnn = InnerProductNetworkLayer(num_fields=num_fields)
        elif prod_method == 'outer':
            self.pnn = OuterProductNetworkLayer(embed_size=embed_size,
                                                num_fields=num_fields,
                                                kernel_type=kwargs.get(
                                                    'kernel_type', 'mat'))
        else:
            raise ValueError(
                f'{prod_method} is not allowed in prod_method. Required: ["inner", "outer"].'
            )

        self.use_bias = use_bias

        cat_size = combination(num_fields, 2) + num_fields
        if self.use_bias:
            cat_size += 1
        self.deep = DNNLayer(output_size=output_size,
                             layer_sizes=deep_layer_sizes,
                             inputs_size=cat_size,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)

        if self.use_bias:
            self.bias = nn.Parameter(torch.zeros((1, 1), names=(
                'B',
                'O',
            )))
            nn.init.uniform_(self.bias.data)
コード例 #4
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 reduction: int,
                 ffm_dropout_p: float = 0.0,
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU()):
        """Initialize FieldAttentiveDeepFieldAwareFactorizationMachineModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_output_size (int): Output size of dense network
            deep_layer_sizes (List[int]): Layer sizes of dense network
            reduction (int): Reduction of CIN layer
            ffm_dropout_p (float, optional): Probability of Dropout in FFM. 
                Defaults to 0.0.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Attributes:
            cen (nn.Module): Module of compose excitation network layer.
            ffm (nn.Module): Module of field-aware factorization machine layer.
            deep (nn.Module): Module of dense layer.
        """
        # refer to parent class
        super(FieldAttentiveDeepFieldAwareFactorizationMachineModel,
              self).__init__()

        # initialize compose excitation network
        self.cen = CENLayer(num_fields, reduction)

        # initialize ffm layer
        self.ffm = FFMLayer(num_fields=num_fields, dropout_p=ffm_dropout_p)

        # calculate the output's size of ffm, i.e. inputs' size of DNNLayer
        inputs_size = combination(num_fields, 2)
        inputs_size *= embed_size

        # initialize dense layer
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
コード例 #5
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 bilinear_type: str = 'all',
                 bias: bool = True):
        """
        Initialize BilinearInteractionLayer
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to "all"
            bias (bool, optional): flag to control using bias. Defaults to True
        
        Raises:
            NotImplementedError: /
            ValueError: when bilinear_type is not in ["all", "each", "interaction"]
        """
        super().__init__()

        self.row_idx = []
        self.col_idx = []
        for i in range(num_fields - 1):
            for j in range(i + 1, num_fields):
                self.row_idx.append(i)
                self.col_idx.append(j)
        self.row_idx = torch.LongTensor(self.row_idx)
        self.col_idx = torch.LongTensor(self.col_idx)

        num_interaction = combination(num_fields, 2)

        self.bilinear_type = bilinear_type
        if bilinear_type == 'all':
            self.bilinear = FieldAllTypeBilinear(embed_size,
                                                 embed_size,
                                                 bias=bias)
        elif bilinear_type == 'each':
            self.bilinear = FieldEachTypeBilinear(num_interaction,
                                                  embed_size,
                                                  embed_size,
                                                  bias=bias)
        elif bilinear_type == 'interaction':
            # self.bilinear = FieldInteractionTypeBilinear(num_interaction, embed_size, embed_size, bias=bias)
            raise NotImplementedError()
        else:
            raise ValueError(
                'bilinear_type only allows: ["all", "each", "interaction"].')
コード例 #6
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 senet_reduction: int,
                 deep_output_size: int,
                 deep_layer_sizes: List[int],
                 bilinear_type: Optional[str] = 'all',
                 bilinear_bias: Optional[bool] = True,
                 deep_dropout_p: Optional[List[float]] = None,
                 deep_activation: Optional[nn.Module] = nn.ReLU()):
        """
        Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork
        
        Args:
            embed_size (int): size of embedding tensor
            num_fields (int): number of inputs' fields
            senet_reduction (int): size of reduction in dense layer of senet.
            deep_output_size (int): output size of dense network
            deep_layer_sizes (List[int]): layer sizes of dense network
            bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to "all"
            bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True
            deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None
            deep_activation (torch.nn.Module, optional): activation function of dense network. Defaults to nn.ReLU()
        """
        super().__init__()

        inputs_size = combination(num_fields, 2)
        inputs_size = inputs_size * embed_size * 2

        self.senet = SENETLayer(num_fields, senet_reduction, squared=False)
        self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                     bilinear_type,
                                                     bilinear_bias)
        self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields,
                                                       bilinear_type,
                                                       bilinear_bias)
        self.deep = DNNLayer(inputs_size=inputs_size,
                             output_size=deep_output_size,
                             layer_sizes=deep_layer_sizes,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)
コード例 #7
0
    def __init__(self,
                 embed_size: int,
                 num_fields: int,
                 deep_layer_sizes: List[int],
                 output_size: int = 1,
                 prod_method: str = "inner",
                 deep_dropout_p: List[float] = None,
                 deep_activation: Callable[[torch.Tensor],
                                           torch.Tensor] = nn.ReLU(),
                 **kwargs):
        r"""Initialize ProductNeuralNetworkModel
        
        Args:
            embed_size (int): Size of embedding tensor
            num_fields (int): Number of inputs' fields
            deep_layer_sizes (List[int]): Layer sizes of dense network
            output_size (int): Output size of model
                i.e. output size of dense network. 
                Defaults to 1.
            prod_method (str): Method of product neural network. 
                Allow: [inner, outer].
                Defaults to inner.
            deep_dropout_p (List[float], optional): Probability of Dropout in dense network. 
                Defaults to None.
            deep_activation (Callable[[T], T], optional): Activation function of dense network. 
                Defaults to nn.ReLU().
        
        Arguments:
            kernel_type (str): Type of kernel to compress outer-product.
        
        Attributes:
            pnn (nn.Module): Module of product neural network.
            deep (nn.Module): Module of dense layer.
            bias (nn.Parameter): Parameter of bias of field-aware factorization machine.

        Raises:
            ValueError: when prod_method is not in [inner, outer].
        """
        # Refer to parent class
        super(ProductNeuralNetworkModel, self).__init__()

        # Initialize product network
        if prod_method == "inner":
            self.pnn = InnerProductNetworkLayer(num_fields=num_fields)
        elif prod_method == "outer":
            self.pnn = OuterProductNetworkLayer(embed_size=embed_size,
                                                num_fields=num_fields,
                                                kernel_type=kwargs.get(
                                                    "kernel_type", "mat"))
        else:
            raise ValueError(
                "'%s' is not allowed in prod_method. Please use ['inner', 'outer']."
            )

        # Calculate size of inputs of dense layer
        cat_size = combination(num_fields, 2) + num_fields + 1

        # Initialize dense layer
        self.deep = DNNLayer(output_size=output_size,
                             layer_sizes=deep_layer_sizes,
                             inputs_size=cat_size,
                             dropout_p=deep_dropout_p,
                             activation=deep_activation)

        # Initialize bias parameter
        self.bias = nn.Parameter(torch.zeros((1, 1), names=("B", "O")))
        nn.init.uniform_(self.bias.data)