Ejemplo n.º 1
0
    def __init__(self, mlp_opt, cnn_opt, name="deconv"):
        """Constructor.

        Args:
          mlp_opt: Dictionary. Kwargs for vae_lib.MLP.
          cnn_opt: Dictionary. Kwargs for vae_lib.ConvNet2D for the CNN.
          name: Optional name.
    """
        super().__init__(name=name)
        assert cnn_opt["output_channels"][-1] is None, cnn_opt
        if "activation" in cnn_opt:
            cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
        self._cnn_opt = cnn_opt

        if mlp_opt and "activation" in mlp_opt:
            mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
        self._mlp_opt = mlp_opt
        self._target_out_shape = None
Ejemplo n.º 2
0
    def __init__(self, cnn_opt, mlp_opt, mode="flatten", name="cnn"):
        """Constructor.

        Args:
          cnn_opt: Dictionary. Kwargs for the cnn. See vae_lib.ConvNet2D for
            details.
          mlp_opt: Dictionary. Kwargs for the mlp. See vae_lib.MLP for details.
          name: String. Optional name.
    """
        super().__init__(name=name)
        if "activation" in cnn_opt:
            cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
        self._cnn_opt = cnn_opt

        if "activation" in mlp_opt:
            mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
        self._mlp_opt = mlp_opt

        self._mode = mode
Ejemplo n.º 3
0
  def __init__(
      self,
      dist=tfd.Normal,
      dist_kwargs=None,
      scale_act=tf.exp,
      scale="stddev",
      scale_val=1.0,
      loc_act=None,
      name="loc_scale_dist",
  ):
    super().__init__(name=name)
    self._scale_act = get_act_func(scale_act)
    self._loc_act = get_act_func(loc_act)
    # supports Normal, Logstic, Laplace, StudentT
    self._dist = get_distribution(dist)
    self._dist_kwargs = dist_kwargs or {}

    assert scale in ["stddev", "var", "prec", "fixed"], scale
    self._scale = scale
    self._scale_val = scale_val
Ejemplo n.º 4
0
 def __init__(
     self,
     hidden_sizes=(64, ),
     pred_gate_bias=0.0,
     corrector_gate_bias=0.0,
     activation=tf.nn.elu,
     name="predcorr_head",
 ):
     super().__init__(name=name)
     self._hidden_sizes = hidden_sizes
     self._activation = utils.get_act_func(activation)
     self._pred_gate_bias = pred_gate_bias
     self._corrector_gate_bias = corrector_gate_bias
Ejemplo n.º 5
0
    def __init__(
        self,
        cnn_opt,
        mlp_opt=None,
        coord_type="linear",
        coord_freqs=3,
        name="broadcast_conv",
    ):
        """Args:
          cnn_opt: dict Kwargs for vae_lib.ConvNet2D for the CNN.
          mlp_opt: None or dict If dictionary, then kwargs for snt.nets.MLP. If
            None, then the model will not process the latent vector by an mlp.
          coord_type: ["linear", "cos", None] type of coordinate channels to
            add.
            None: add no coordinate channels.
            linear: two channels with values linearly spaced from -1. to 1. in
              the H and W dimension respectively.
            cos: coord_freqs^2 many channels containing cosine basis functions.
          coord_freqs: int number of frequencies used to construct the cosine
            basis functions (only for coord_type=="cos")
          name: Optional name.
    """
        super().__init__(name=name)

        assert cnn_opt["output_channels"][-1] is None, cnn_opt
        if "activation" in cnn_opt:
            cnn_opt["activation"] = get_act_func(cnn_opt["activation"])
        self._cnn_opt = cnn_opt

        if mlp_opt and "activation" in mlp_opt:
            mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
        self._mlp_opt = mlp_opt

        self._target_out_shape = None
        self._coord_type = coord_type
        self._coord_freqs = coord_freqs
Ejemplo n.º 6
0
  def __init__(
      self,
      num_components,
      component_dist,
      mask_activation=None,
      name="masked_mixture",
  ):
    """
        Spatial Mixture Model composed of a categorical masking distribution and
        a custom pixel-wise component distribution (usually logistic or
        gaussian).

        Args:
          num_components: int Number of mixture components >= 2
          component_dist: the distribution to use for the individual components
          mask_activation: str or function or None activation function that
            should be applied to the mask before the softmax.
          name: str
    """

    super().__init__(name=name)
    self._num_components = num_components
    self._dist = component_dist
    self._mask_activation = get_act_func(mask_activation)
Ejemplo n.º 7
0
 def __init__(self, name="mlp", **mlp_opt):
     super().__init__(name=name)
     if "activation" in mlp_opt:
         mlp_opt["activation"] = get_act_func(mlp_opt["activation"])
     self._mlp_opt = mlp_opt
     assert mlp_opt["output_sizes"][-1] is None, mlp_opt