Пример #1
0
    def __init__(self, num_dimensions, flow_config,
                 dataset_class=None,
                 vocab=None, vocab_size=-1,
                 use_decoder=False, decoder_config=None,
                 default_embed_layer_dims=64,
                 category_prior=None,
                 **kwargs):
        super().__init__()
        self.use_decoder = use_decoder
        self.dataset_class = dataset_class
        self.D = num_dimensions

        self.embed_layer, self.vocab_size = create_embed_layer(vocab, vocab_size, default_embed_layer_dims)
        self.num_categories = self.vocab_size

        self.prior_distribution = LogisticDistribution(mu=0.0, sigma=1.0)  # Prior distribution in encoding flows
        self.flow_layers = _create_flows(num_dims=num_dimensions,
                                         embed_dims=self.embed_layer.weight.shape[1],
                                         config=flow_config)
        # Create decoder if needed
        if self.use_decoder:
            self.decoder = create_decoder(num_categories=self.vocab_size,
                                          num_dims=self.D,
                                          config=decoder_config)

        # Prior over the categories. If not given, a uniform prior is assumed
        if category_prior is None:
            category_prior = torch.zeros(self.vocab_size, dtype=torch.float32)
        else:
            assert category_prior.shape[
                       0] == self.num_categories, "[!] ERROR: Category prior needs to be of size [%i] but is %s" % (
            self.num_categories, str(category_prior.shape))
            if isinstance(category_prior, np.ndarray):
                category_prior = torch.from_numpy(category_prior)
        self.register_buffer("category_prior", F.log_softmax(category_prior, dim=-1))
    def __init__(self,
                 num_dimensions,
                 flow_config,
                 dataset_class=None,
                 vocab=None,
                 vocab_size=-1,
                 use_decoder=False,
                 decoder_config=None,
                 default_embed_layer_dims=64,
                 category_prior=None,
                 **kwargs):
        super().__init__()
        self.use_decoder = use_decoder
        self.dataset_class = dataset_class
        self.D = num_dimensions

        self.embed_layer, self.vocab_size = create_embed_layer(
            vocab, vocab_size, default_embed_layer_dims)
        self.num_categories = self.vocab_size

        self.prior_distribution = LogisticDistribution(
            mu=0.0, sigma=1.0)  # Prior distribution in encoding flows
        self.flow_layers = _create_flows(
            num_dims=num_dimensions,
            embed_dims=self.embed_layer.weight.shape[1],
            config=flow_config)
        # Create decoder if needed
        self.decoder = create_decoder(num_categories=self.vocab_size,
                                      num_dims=self.D,
                                      config=decoder_config)