Пример #1
0
 def __init__(self, config):
     super(MRR_in_N, self).__init__(config)
     assert self.AN <= self.N
     self.name = "MRR_in_{}".format(self.AN)
     logger.info(
         utils.generate_module_info(self.name, "N", self.N, "AN", self.AN,
                                    "skip", self.skip))
Пример #2
0
    def __init__(self, config):
        super(KLDivLoss, self).__init__()
        self.size_average = config[
            "size_average"] if "size_average" in config else False
        self.reduce = config["reduce"] if "reduce" in config else True
        self.reduction = config[
            "reduction"] if "reduction" in config else "elementwise_mean"

        # parameters for smoothing label
        self.do_label_smoothing = config[
            "do_label_smoothing"] if "do_label_smoothing" in config else False
        self.ratio = config["ratio"] if "ratio" in config else 2
        self.epsilon = config["epsilon"] if "epsilon" in config else 0.1

        self.loss_module = torch.nn.KLDivLoss(size_average=self.size_average,
                                              reduce=self.reduce,
                                              reduction=self.reduction)

        self.name = config[
            "name"] if "name" in config else " KL Divergence Loss"
        logger.info(
            utils.generate_module_info(self.name, "size_average",
                                       self.size_average, "reduce",
                                       self.reduce, "reduction",
                                       self.reduction))
Пример #3
0
    def __init__(self, config):
        super(AttentiveModule, self).__init__()
        assert "x_dim" in config and "y_dim" in config
        # Attention layer
        attention_config = deepcopy(config)
        attention_config["name"] = "Attention"
        self.attention = Attention(attention_config)

        self.is_layer_norm = config[
            "is_layer_norm"] if "is_layer_norm" in config else True
        if self.is_layer_norm:
            # Attention layer norm
            self.attention_layer_norm = nn.LayerNorm([config["y_dim"]],
                                                     eps=1e-6)
            # self.attention_layer_norm = LayerNorm(
            #     {"name": "Attention_layer_norm", "parameter_shape": [config["x_dim"]], "axis": [-1]})

            # FFN layer norm
            self.ffn_layer_norm = nn.LayerNorm([config["y_dim"]], eps=1e-6)
            # self.ffn_layer_norm = LayerNorm(
            #     {"name": "FFN_layer_norm", "parameter_shape": [config["x_dim"]], "axis": [-1]})

        self.ffn = FFN({
            "name": "FFN",
            "input_dim": config["y_dim"],
            "out_dim_0": config["y_dim"],
            "out_dim_1": config["y_dim"]
        })

        self.name = config["name"] if "name" in config else "AttentiveModule"
        logger.info(
            utils.generate_module_info(self.name, "is_layer_norm",
                                       self.is_layer_norm))
Пример #4
0
 def __init__(self, config):
     super(Recall_N_at_K, self).__init__(config)
     self.K = self.config["k"] if "k" in self.config else 1
     assert self.AN >= self.K and self.AN <= self.N
     self.name = "R{}@{}".format(self.AN, self.K)
     logger.info(
         utils.generate_module_info(self.name, "N", self.N, "AN", self.AN,
                                    "K", self.K, "skip", self.skip))
Пример #5
0
    def __init__(self, config):
        super(MarginRankingLoss, self).__init__()
        self.margin = config["margin"] if "margin" in config else 0.0
        self.reduction = config[
            "reduction"] if "reduction" in config else "elementwise_mean"

        self.sigmoid = torch.nn.Sigmoid()
        self.loss_module = torch.nn.MarginRankingLoss(margin=self.margin,
                                                      reduction=self.reduction)
        self.name = config[
            "name"] if "name" in config else "Margin Ranking Loss"
        logger.info(
            utils.generate_module_info(self.name, "margin", self.margin,
                                       "reduction", self.reduction))
Пример #6
0
    def __init__(self, config):
        super(LayerNorm, self).__init__()
        assert "parameter_shape" in config and isinstance(
            config["parameter_shape"], list or tuple)
        self.parameter_shape = config["parameter_shape"]
        assert "axis" in config and isinstance(config["axis"], list or tuple)
        self.axis = config["axis"]
        self.scale = nn.Parameter(torch.ones(self.parameter_shape))
        self.bias = nn.Parameter(torch.zeros(self.parameter_shape))

        self.name = config["name"] if "name" in config else "Layer Norm"
        logger.info(
            utils.generate_module_info(self.name, "parameter_shape",
                                       self.parameter_shape, "axis",
                                       self.axis))
Пример #7
0
 def __init__(self, config):
     # lr_scheduler
     if "lr_scheduler" in config["params"]:
         self.lr_scheduler = LrScheduler(config["params"]["lr_scheduler"])
         config["params"].pop("lr_scheduler")
     else:
         self.lr_scheduler = LrScheduler()
     # optimizer
     self.optimizer = utils.name2function(config["function"])
     self.optimizer_params = config["params"]
     if isclass(self.optimizer) and issubclass(self.optimizer,
                                               torch.optim.Optimizer):
         self.name = config["function"].strip().rsplit('.', maxsplit=1)[-1]
         logger.info(
             utils.generate_module_info(self.name, **self.optimizer_params))
Пример #8
0
    def __init__(self, config):
        super(Attention, self).__init__()
        assert "x_dim" in config and "y_dim" in config
        self.x_dim = config["x_dim"]
        self.y_dim = config["y_dim"]
        self.drop_prob = config["drop_prob"] if "drop_prob" in config else None
        self.bilinear_matrix = nn.Linear(in_features=self.x_dim,
                                         out_features=self.y_dim,
                                         bias=False)
        if self.drop_prob is not None:
            self.dropout = nn.Dropout(p=self.drop_prob)

        self.name = config["name"] if "name" in config else "Attention"
        logger.info(
            utils.generate_module_info(self.name, "x_dim", self.x_dim, "y_dim",
                                       self.y_dim, "drop_prob",
                                       self.drop_prob))
Пример #9
0
def SGDOptimizer(params, **config):
    lr = config["lr"] if "lr" in config else 0.001
    momentum = config["momentum"] if "momentum" in config else 0
    weight_decay = config["weight_decay"] if "weight_decay" in config else 0
    dampening = config["dampening"] if "dampening" in config else 0
    nesterov = config["nesterov"] if "nesterov" in config else False

    name = "SGD Optimizer"
    logger.info(
        utils.generate_module_info(name, "lr", lr, "momentum", momentum,
                                   "weight_decay", weight_decay, "dampening",
                                   dampening, "nesterov", nesterov))

    return torch.optim.SGD(params,
                           lr=lr,
                           momentum=momentum,
                           weight_decay=weight_decay,
                           dampening=dampening,
                           nesterov=nesterov)
Пример #10
0
def AdamOptimizer(params, **config):
    lr = config["lr"] if "lr" in config else 0.001
    betas = config["betas"] if "betas" in config else (0.9, 0.999)
    eps = config["eps"] if "eps" in config else 1e-08
    weight_decay = config["weight_decay"] if "weight_decay" in config else 0
    amsgrad = config["amsgrad"] if "amsgrad" in config else False

    name = "Adam Optimizer"
    logger.info(
        utils.generate_module_info(name, "lr", lr, "betas", betas, "eps", eps,
                                   "weight_decay", weight_decay, "amsgrad",
                                   amsgrad))

    return torch.optim.Adam(params,
                            lr=lr,
                            betas=betas,
                            eps=eps,
                            weight_decay=weight_decay,
                            amsgrad=amsgrad)
Пример #11
0
    def __init__(self, config):
        super(PositionEncoder, self).__init__()
        assert "lambda_size" in config
        self.lambda_size = config["lambda_size"]
        self.min_timescale = config[
            "min_timescale"] if "min_timescale" in config else 1.0
        self.max_timescale = config[
            "max_timescale"] if "max_timescale" in config else 1.0e4
        self.fill_value = config["fill_value"] if "fill_value" in config else 0

        self._lambda = nn.Parameter(
            torch.full((self.lambda_size, 1), self.fill_value))

        self.name = config["name"] if "name" in config else "Position Encoder"
        logger.info(
            utils.generate_module_info(self.name, "lambda_size",
                                       self.lambda_size, "min_timescale",
                                       self.min_timescale, "max_timescale",
                                       self.max_timescale, "fill_value",
                                       self.fill_value))
Пример #12
0
    def __init__(self, config):
        super(MultiHeadedAttentiveModule, self).__init__()
        assert "x_dim" in config and "y_dim" in config and "head_num" in config
        # Attention layer
        attention_config = deepcopy(config)
        attention_config["name"] = "Attention"
        self.attention = Attention(attention_config)

        self.input_dim = config["x_dim"]
        self.output_dim = config["y_dim"]
        self.head_num = config["head_num"]
        assert self.input_dim % self.head_num == 0
        self.sub_input_dim = self.input_dim // self.head_num

        self.input_linears = utils.clones(
            nn.Linear(self.input_dim, self.output_dim), 3)
        self.output_linear = nn.Linear(self.output_dim, self.output_dim)

        self.is_layer_norm = config[
            "is_layer_norm"] if "is_layer_norm" in config else True
        if self.is_layer_norm:
            # Attention layer norm
            self.attention_layer_norm = nn.LayerNorm([self.output_dim],
                                                     eps=1e-6)
            # FFN layer norm
            self.ffn_layer_norm = nn.LayerNorm([self.output_dim], eps=1e-6)

        self.ffn = FFN({
            "name": "FFN",
            "input_dim": self.output_dim,
            "out_dim_0": self.output_dim,
            "out_dim_1": self.output_dim
        })

        self.name = config[
            "name"] if "name" in config else "MultiHeadAttentiveModule"
        logger.info(
            utils.generate_module_info(self.name, "head_num", self.head_num,
                                       "input_dim", self.input_dim,
                                       "output_dim", self.output_dim,
                                       "is_layer_norm", self.is_layer_norm))
Пример #13
0
 def __init__(self, config=None):
     config = deepcopy(config)
     if config == None or not isinstance(config, dict):
         config = {
             "function": "torch.optim.lr_scheduler.StepLR",
             "params": {
                 "step_size": 1000000,
                 "gamma": 1
             }
         }
     self.lr_scheduler = utils.name2function(config["function"])
     self.lr_scheduler_params = config["params"]
     if isclass(self.lr_scheduler) and issubclass(
             self.lr_scheduler,
         (torch.optim.lr_scheduler._LRScheduler,
          torch.optim.lr_scheduler.ReduceLROnPlateau)):
         self.name = config["function"].strip().rsplit(
             '.', maxsplit=1)[-1] + " Scheduler"
         logger.info(
             utils.generate_module_info(self.name,
                                        **self.lr_scheduler_params))
Пример #14
0
    def __init__(self, config):
        super(CrossEntropyLoss, self).__init__()
        if "weight" in config and isinstance(
                config["weight"], (list, tuple, np.ndarray, torch.Tensor)):
            self.weight = torch.Tensor(config["weight"])
        else:
            self.weight = None
        self.ignore_index = config[
            "ignore_index"] if "ignore_index" in config else -100
        self.reduction = config[
            "reduction"] if "reduction" in config else "elementwise_mean"

        self.loss_module = torch.nn.CrossEntropyLoss(
            weight=self.weight,
            ignore_index=self.ignore_index,
            reduction=self.reduction)
        self.name = config["name"] if "name" in config else "Cross Entropy Loss"
        logger.info(
            utils.generate_module_info(self.name, "weight", self.weight,
                                       "ignore_index", self.ignore_index,
                                       "reduction", self.reduction))
Пример #15
0
    def __init__(self, config):
        super(FFN, self).__init__()
        assert "input_dim" in config and "out_dim_0" in config and "out_dim_1" in config
        self.input_dim = config["input_dim"]
        self.out_dim_0 = config["out_dim_0"]
        self.out_dim_1 = config["out_dim_1"]

        self.linear_1 = nn.Linear(in_features=self.input_dim,
                                  out_features=self.out_dim_0,
                                  bias=False)
        self.bias_1 = nn.Parameter(torch.zeros(1))

        self.relu = nn.ReLU(inplace=True)

        self.linear_2 = nn.Linear(in_features=self.out_dim_0,
                                  out_features=self.out_dim_1,
                                  bias=False)
        self.bias_2 = nn.Parameter(torch.zeros(1))

        self.name = config["name"] if "name" in config else "FFN"
        logger.info(
            utils.generate_module_info(self.name, "input_dim", self.input_dim,
                                       "out_dim_0", self.out_dim_0,
                                       "out_dim_1", self.out_dim_1))
Пример #16
0
    def __init__(self, config):
        super(BCEWithLogitsLoss, self).__init__()
        if "weight" in config and isinstance(
                config["weight"], (list, tuple, np.ndarray, torch.Tensor)):
            self.weight = torch.Tensor(config["weight"])
        else:
            self.weight = None
        if "pos_weight" in config and isinstance(
                config["pos_weight"], (list, tuple, np.ndarray, torch.Tensor)):
            self.pos_weight = torch.Tensor(config["pos_weight"])
        else:
            self.pos_weight = None
        self.reduction = config[
            "reduction"] if "reduction" in config else "elementwise_mean"

        self.loss_module = nn.BCEWithLogitsLoss(weight=self.weight,
                                                reduction=self.reduction,
                                                pos_weight=self.pos_weight)
        self.name = config[
            "name"] if "name" in config else "Binary Cross Entropy"
        logger.info(
            utils.generate_module_info(self.name, "weight", self.weight,
                                       "pos_weight", self.pos_weight,
                                       "reduction", self.reduction))
Пример #17
0
 def __init__(self, config):
     self.name = "Accuracy"
     logger.info(utils.generate_module_info(self.name))