예제 #1
0
    def __init__(self, dims: Tuple[int, int, Tuple[int, ...], Tuple[int, ...]],
                 LadderEncode: Type[LadderEncoder] = LadderEncoder,
                 LadderDecode: Type[LadderDecoder] = LadderDecoder,
                 Classify: Type[Classifier] = Classifier,
                 Decode: Type[Decoder] = Decoder):
        """
        Ladder version of the Deep Generative Model.
        Uses a hierarchical representation that is
        trained end-to-end to give very nice disentangled
        representations.

        :param dims: dimensions of x, y, z layers and h layers
            note that len(z) == len(h).
        """
        x_dim, y_dim, z_dim, h_dim = dims
        super(LadderDeepGenerativeModel, self).__init__((x_dim, y_dim, z_dim[0], h_dim))

        neurons: List[int] = [x_dim, *h_dim]
        encoder_layers = [LadderEncode((neurons[i - 1], neurons[i], z_dim[i - 1])) for i in range(1, len(neurons))]

        e = encoder_layers[-1]
        encoder_layers[-1] = LadderEncode((e.in_features + y_dim, e.out_features, e.z_dim))

        decoder_layers = [LadderDecode((z_dim[i - 1], h_dim[i - 1], z_dim[i])) for i in range(1, len(h_dim))][::-1]

        self.classifier = Classify((x_dim, h_dim[0], y_dim))

        # noinspection PyTypeChecker
        self.encoder = nn.ModuleList(encoder_layers)
        # noinspection PyTypeChecker
        self.decoder = nn.ModuleList(decoder_layers)
        self.reconstruction = Decode((z_dim[0] + y_dim, h_dim, x_dim))
    def __init__(self, dims: Tuple[int, Tuple[int, ...], int],
                 activation_fn: Callable[[Tensor], Tensor]=tr.relu,
                 output_activation: Opt[Callable[[Tensor], Tensor]]=tr.sigmoid):
        """
        Generative network

        Generates samples from the original distribution
        p(x) by transforming a latent representation, e.g.
        by finding p_θ(x|z).

        :param dims: dimensions of the networks
            given by the number of neurons on the form
            [latent_dim, [hidden_dims], input_dim].
        """
        super(Decoder, self).__init__()

        z_dim, h_dim, x_dim = dims

        neurons = [z_dim, *h_dim]
        linear_layers = [nn.Linear(neurons[i - 1], neurons[i]) for i in range(1, len(neurons))]
        # noinspection PyTypeChecker
        self.hidden = nn.ModuleList(linear_layers)
        self.reconstruction = nn.Linear(h_dim[-1], x_dim)
        self.output_activation = Act(output_activation) if (output_activation is not None) else None
        self.activation_fn = Act(activation_fn)
    def __init__(self, dims: Tuple[int, Tuple[int, ...], int],
                 sample_layer: Type[BaseSample]=GaussianSample,
                 activation_fn: Callable[[Tensor], Tensor]=tr.relu):
        """
        Inference network

        Attempts to infer the probability distribution
        p(z|x) from the data by fitting a variational
        distribution q_φ(z|x). Returns the two parameters
        of the distribution (µ, log σ²).

        :param dims: dimensions of the networks
           given by the number of neurons on the form
           [input_dim, [hidden_dims], latent_dim].
        :param sample_layer: subclass of the BaseSample
        """
        super(Encoder, self).__init__()

        x_dim, h_dim, z_dim = dims
        neurons = [x_dim, *h_dim]
        linear_layers = [nn.Linear(neurons[i - 1], neurons[i]) for i in range(1, len(neurons))]
        # noinspection PyTypeChecker
        self.hidden = nn.ModuleList(linear_layers)
        self.activation_fn = Act(activation_fn)
        self.sample = sample_layer(h_dim[-1], z_dim)
 def __init__(self, dims: Seq[int], activation_fn: Callable[[Tensor], Tensor]=tr.relu,
              output_activation: Opt[Callable[[Tensor], Tensor]]=None):
     super(Perceptron, self).__init__()
     self.dims = dims
     self.activation_fn = Act(activation_fn)
     self.output_activation = Act(output_activation) if (output_activation is not None) else None
     # noinspection PyTypeChecker
     self.layers = nn.ModuleList(list(map(lambda d: nn.Linear(*d), list(zip(dims, dims[1:])))))
 def __init__(self,
              dim: int,
              hidden_dim: int = 8,
              base_network: Type[FCNN] = FCNN):
     super().__init__()
     self.dim = dim
     # noinspection PyTypeChecker
     self.layers = nn.ModuleList()
     self.initial_param = nn.Parameter(tr.zeros(2))
     for i in range(1, dim):
         self.layers += [base_network(i, 2, hidden_dim)]
     self.reset_parameters()
    def __init__(self, dims: Tuple[int, Tuple[int, ...], Tuple[int, ...]],
                 LadderEncode: Type[LadderEncoder]=LadderEncoder,
                 LadderDecode: Type[LadderDecoder]=LadderDecoder,
                 Decode: Type[Decoder]=Decoder):
        """
        Ladder Variational Autoencoder as described by
        [Sønderby 2016]. Adds several stochastic
        layers to improve the log-likelihood estimate.

        :param dims: x, z and hidden dimensions of the networks
        """
        x_dim, z_dim, h_dim = dims
        super(LadderVariationalAutoencoder, self).__init__((x_dim, z_dim[0], h_dim))

        neurons: List[int] = [x_dim, *h_dim]
        encoder_layers = [LadderEncode((neurons[i - 1], neurons[i], z_dim[i - 1])) for i in range(1, len(neurons))]
        decoder_layers = [LadderDecode((z_dim[i - 1], h_dim[i - 1], z_dim[i])) for i in range(1, len(h_dim))][::-1]

        # noinspection PyTypeChecker
        self.encoder = nn.ModuleList(encoder_layers)
        # noinspection PyTypeChecker
        self.decoder = nn.ModuleList(decoder_layers)
        self.reconstruction = Decode((z_dim[0], h_dim, x_dim))
 def __init__(self,
              dim: int,
              K: int = 5,
              B: int = 3,
              hidden_dim: int = 8,
              base_network: Type[FCNN] = FCNN):
     super().__init__()
     self.dim = dim
     self.K = K
     self.B = B
     # noinspection PyTypeChecker
     self.layers = nn.ModuleList()
     self.init_param = nn.Parameter(tr.zeros(3 * K - 1))
     for i in range(1, dim):
         self.layers += [base_network(i, 3 * K - 1, hidden_dim)]
     self.reset_parameters()
    def __init__(self, dim: int = None, flows: List[Flow] = None):
        """
        Presents a sequence of normalizing flows as a ``torch.nn.Module``.

        default value is [PlanarNormalizingFlow(dim) for i in range(16)]

        Forked from github.com/wohlert/semi-supervised-pytorch
        """
        super(NormalizingFlows, self).__init__()

        if (flows is None) and (dim is not None):
            flows_ = [PlanarNormalizingFlow(dim=dim) for _ in range(16)]
        elif flows:
            flows_ = flows
        else:
            raise ValueError(
                'Either dim or non empty flows list should be provided.')

        flows__ = nn.ModuleList(flows_)
        # noinspection PyTypeChecker
        self.flows = flows__
예제 #9
0
 def __init__(self, prior: Distribution, flows: List[Flow]):
     super().__init__()
     self.prior = prior
     flows_ = nn.ModuleList(flows)
     # noinspection PyTypeChecker
     self.flows = flows_
    def __init__(self,
                 z_dims: Tuple[int, ...],
                 prior_dists: Tuple[Normal, ...] = None,
                 PriorDist: Type[Normal] = None,
                 q_dist: Normal = Normal(),
                 mss: bool = True,
                 kl: BetaTC = BetaTC(mi__γ_tc__λ_dw=True),
                 dataset_size: int = 0):
        """
        1) WARNING: Carefully pick ``prior_dist``, ``q_dist`` and ``BaseSample`` subclass for ``Encoder``
        reparametrization trick (when used with ``semi_supervised_typed``).
        Not all combinations are valid. ``AuxiliaryDeepGenerativeModel`` is of particular caution
        as it uses optional ``pz_params`` arg of the forward method.

        2) ``forward`` and ``__call__`` methods return modified or unmodified KLD
        (depends on ``self.γ``, ``self.λ``, ``self.kl``).

        3) KLD modification is not compatible with adding ``qz_flow`` normalizing flow.
        Experimental support is added for ``kl=BetaTC(kld__γmin1_tc=True)`` but it affects only
        q_0(z) that is before the flows hence I'm not sure about the impact.

        :param z_dims: tuple of dims to switch between
        :param prior_dists: default is Normal
        :param PriorDist: default is Normal
        :param q_dist:
        :param mss: whether to use minibatch stratified sampling. Another option is minibatch weighted sampling
        :param kl: KLD formula
        :param dataset_size: if not set here it should later be set via self.set_dataset_size method
        """
        super(BetaTCKLDLoss, self).__init__()
        Verbose.__init__(self=self)
        self.kl = kl
        self._maybe_unmod_kld = kl.mi__γ_tc__λ_dw or kl.kld__γmin1_tc
        self.γ, self.λ = 1, 1
        self.mss = mss
        self.z_dim = z_dims[0]

        self.q_dist = q_dist
        if isinstance(self.q_dist, (Normal, Laplace)):
            self.q_params_μ_first = True
        else:
            self.q_params_μ_first = False

        # distribution family of p(z)
        if (prior_dists is not None) and (PriorDist is None):
            if (len(z_dims) != len(prior_dists)) or (len(z_dims) != len(
                    set(z_dims))):
                raise ValueError
            self._prior_dict = {
                z_dim: prior_dist
                for z_dim, prior_dist in zip(z_dims, prior_dists)
            }
        elif prior_dists is None:
            PriorDist_ = PriorDist if (PriorDist is not None) else Normal
            self._prior_dict = {dim: PriorDist_() for dim in z_dims}
            for dim, dist in self._prior_dict.items():
                dist.set_prior_params(z_dim=dim)
        else:
            raise ValueError
        prior_dists_ = list(self._prior_dict.values())
        self._prior_dists = nn.ModuleList(prior_dists_)
        if len(set([type(dist)
                    for dist in prior_dists_] + [type(q_dist)])) != 1:
            raise NotImplementedError
        self.Dist = type(q_dist)

        self.dataset_size = dataset_size
        self.qz_x_flow = None