Esempio n. 1
0
class TabNetCombinerConfig:
    size: int = schema.PositiveInteger(default=32)  # N_a in the paper
    output_size: int = schema.PositiveInteger(default=32)  # N_d in the paper
    num_steps: int = schema.NonNegativeInteger(default=1)  # N_steps in the paper
    num_total_blocks: int = schema.NonNegativeInteger(default=4)
    num_shared_blocks: int = schema.NonNegativeInteger(default=2)
    relaxation_factor: float = 1.5  # gamma in the paper
    bn_epsilon: float = 1e-3
    bn_momentum: float = 0.7  # m_B in the paper
    # B_v from the paper
    bn_virtual_bs: Optional[int] = schema.PositiveInteger()
    sparsity: float = 1e-5  # lambda_sparse in the paper
    dropout: float = schema.FloatRange(default=0.0, min=0, max=1)

    class Meta:
        unknown = INCLUDE
Esempio n. 2
0
class TransformerCombinerConfig:
    num_layers: int = schema.PositiveInteger(default=1)
    hidden_size: int = schema.NonNegativeInteger(default=256)
    num_heads: int = schema.NonNegativeInteger(default=8)
    transformer_fc_size: int = schema.NonNegativeInteger(default=256)
    dropout: float = schema.FloatRange(default=0.1, min=0, max=1)
    fc_layers: Optional[List[Dict[str, Any]]] = schema.DictList()
    num_fc_layers: int = schema.NonNegativeInteger(default=0)
    fc_size: int = schema.PositiveInteger(default=256)
    use_bias: bool = True
    weights_initializer: Union[str, Dict] = schema.InitializerOrDict(default="xavier_uniform")
    bias_initializer: Union[str, Dict] = schema.InitializerOrDict(default="zeros")
    norm: Optional[str] = schema.StringOptions(["batch", "layer"])
    norm_params: Optional[dict] = schema.Dict()
    fc_activation: str = "relu"
    fc_dropout: float = schema.FloatRange(default=0.0, min=0, max=1)
    fc_residual: bool = False
    reduce_output: Optional[str] = schema.ReductionOptions(default="mean")

    class Meta:
        unknown = INCLUDE
Esempio n. 3
0
class TabNetCombinerConfig:
    size: int = schema.PositiveInteger(default=32)  # N_a in the paper
    output_size: int = schema.PositiveInteger(default=32)  # N_d in the paper
    num_steps: int = schema.NonNegativeInteger(
        default=1)  # N_steps in the paper
    num_total_blocks: int = schema.NonNegativeInteger(default=4)
    num_shared_blocks: int = schema.NonNegativeInteger(default=2)
    relaxation_factor: float = 1.5  # gamma in the paper
    bn_epsilon: float = 1e-3
    bn_momentum: float = 0.7  # m_B in the paper
    # B_v from the paper
    bn_virtual_bs: Optional[int] = schema.PositiveInteger()
    sparsity: float = 1e-5  # lambda_sparse in the paper
    entmax_mode: str = schema.StringOptions(
        ["entmax15", "sparsemax", "constant", "adaptive"], default="sparsemax")
    entmax_alpha: float = schema.FloatRange(
        default=1.5, min=1, max=2)  # 1 corresponds to softmax, 2 is sparsemax.
    dropout: float = schema.FloatRange(default=0.0, min=0, max=1)

    class Meta:
        unknown = INCLUDE
Esempio n. 4
0
class ConcatCombinerConfig:
    fc_layers: Optional[List[Dict[str, Any]]] = schema.DictList()
    num_fc_layers: int = schema.NonNegativeInteger(default=0)
    fc_size: int = schema.PositiveInteger(default=256)
    use_bias: bool = True
    weights_initializer: Union[str, Dict] = schema.InitializerOrDict(default="xavier_uniform")
    bias_initializer: Union[str, Dict] = schema.InitializerOrDict(default="zeros")
    norm: Optional[str] = schema.StringOptions(["batch", "layer"])
    norm_params: Optional[dict] = schema.Dict()
    activation: str = "relu"
    dropout: float = schema.FloatRange(default=0.0, min=0, max=1)
    flatten_inputs: bool = False
    residual: bool = False

    class Meta:
        unknown = INCLUDE