コード例 #1
0
ファイル: relational_tucker3.py プロジェクト: wayne9qiu/kge
    def __init__(
        self,
        config: Config,
        dataset: Dataset,
        configuration_key=None,
        init_for_load_only=False,
    ):
        self._init_configuration(config, configuration_key)

        ent_emb_dim = self.get_option("entity_embedder.dim")
        ent_emb_conf_key = self.configuration_key + ".entity_embedder"
        round_ent_emb_dim_to = self.get_option("entity_embedder.round_dim_to")
        if len(round_ent_emb_dim_to) > 0:
            ent_emb_dim = round_to_points(round_ent_emb_dim_to, ent_emb_dim)
        config.set(ent_emb_conf_key + ".dim", ent_emb_dim, log=True)

        rescal_set_relation_embedder_dim(
            config, dataset, self.configuration_key + ".relation_embedder")

        super().__init__(
            config=config,
            dataset=dataset,
            scorer=RescalScorer,
            configuration_key=self.configuration_key,
            init_for_load_only=init_for_load_only,
        )
コード例 #2
0
ファイル: lookup_embedder.py プロジェクト: pbloem/kge
    def __init__(self, config: Config, dataset: Dataset,
                 configuration_key: str, vocab_size: int):
        super().__init__(config, dataset, configuration_key)

        # read config
        self.normalize_p = self.get_option("normalize.p")
        self.normalize_with_grad = self.get_option("normalize.with_grad")
        self.regularize = self.check_option("regularize", ["", "lp"])
        self.sparse = self.get_option("sparse")
        self.config.check("train.trace_level", ["batch", "epoch"])
        self.vocab_size = vocab_size

        round_embedder_dim_to = self.get_option("round_dim_to")
        if len(round_embedder_dim_to) > 0:
            self.dim = round_to_points(round_embedder_dim_to, self.dim)

        # setup embedder
        self.embeddings = torch.nn.Embedding(self.vocab_size,
                                             self.dim,
                                             sparse=self.sparse)

        # initialize weights
        init_ = self.get_option("initialize")
        try:
            init_args = self.get_option("initialize_args." + init_)
        except KeyError:
            init_args = self.get_option("initialize_args")

        # Automatically set arg a (lower bound) for uniform_ if not given
        if init_ == "uniform_" and "a" not in init_args:
            init_args["a"] = init_args["b"] * -1
            self.set_option("initialize_args.a", init_args["a"], log=True)

        self.initialize(self.embeddings.weight.data, init_, init_args)

        # TODO handling negative dropout because using it with ax searches for now
        dropout = self.get_option("dropout")
        if dropout < 0:
            if config.get("train.auto_correct"):
                config.log("Setting {}.dropout to 0, "
                           "was set to {}.".format(configuration_key, dropout))
                dropout = 0
        self.dropout = torch.nn.Dropout(dropout)
コード例 #3
0
ファイル: lookup_embedder.py プロジェクト: vbharadwaj-bk/kge
    def __init__(
        self,
        config: Config,
        dataset: Dataset,
        configuration_key: str,
        vocab_size: int,
        init_for_load_only=False,
    ):
        super().__init__(config,
                         dataset,
                         configuration_key,
                         init_for_load_only=init_for_load_only)

        # read config
        self.normalize_p = self.get_option("normalize.p")
        self.regularize = self.check_option("regularize", ["", "lp"])
        self.sparse = self.get_option("sparse")
        self.config.check("train.trace_level", ["batch", "epoch"])
        self.vocab_size = vocab_size

        round_embedder_dim_to = self.get_option("round_dim_to")
        if len(round_embedder_dim_to) > 0:
            self.dim = round_to_points(round_embedder_dim_to, self.dim)

        self._embeddings = torch.nn.Embedding(
            self.vocab_size,
            self.dim,
            sparse=self.sparse,
        )

        if not init_for_load_only:
            # initialize weights
            self._init_embeddings(self._embeddings.weight.data)
            self._normalize_embeddings()

        # TODO handling negative dropout because using it with ax searches for now
        dropout = self.get_option("dropout")
        if dropout < 0:
            if config.get("train.auto_correct"):
                config.log("Setting {}.dropout to 0, "
                           "was set to {}.".format(configuration_key, dropout))
                dropout = 0
        self.dropout = torch.nn.Dropout(dropout)