def setup(self):
        base_config = self.config.base_config

        self.encoder = base_models.TransformerIOEncoder(config=base_config,
                                                        name='encoder')
        # Shifting is done separately in decoder.
        self.decoder = base_models.TransformerDecoder(
            config=base_config.replace(shift=False), name='decoder')
Ejemplo n.º 2
0
    def setup(self):
        base_config = self.config.base_config

        self.encoder = base_models.TransformerIOEncoder(config=base_config,
                                                        name='encoder')
        # Shifting is done before call to decoder in order to compute masks.
        self.decoder = base_models.TransformerDecoder(
            config=base_config.replace(shift=False), name='decoder')
Ejemplo n.º 3
0
  def setup(self):
    cfg = self.config

    self.encoder = base_models.TransformerIOEncoder(config=cfg, name='encoder')
    self.decoder = base_models.TransformerDecoder(config=cfg, name='decoder')
    if self.use_expanding_layer:
      self.expand = nn.Dense(
          self.num_partial_programs * cfg.emb_dim,
          kernel_init=cfg.kernel_init,
          bias_init=cfg.bias_init,
          name='expandembed')
  def setup(self):
    base_config = self.config.base_config

    if self.config.dataset_type == 'robust_fill':
      self.encoder = base_models.TransformerIOEncoder(config=base_config,
                                                      name='encoder')
    elif self.config.dataset_type in ['robust_fill_base', 'scan']:
      self.encoder = base_models.TransformerEncoder(config=base_config,
                                                    name='encoder')
    else:
      raise ValueError('Unhandled dataset_type: {}'.format(
          self.config.dataset_type))
    # Shifting is done separately in decoder.
    self.decoder = base_models.TransformerDecoder(
        config=base_config.replace(shift=False), name='decoder')
Ejemplo n.º 5
0
    def setup(self):
        cfg = self.config

        self.encoder = models.TransformerIOEncoder(config=cfg.base_cfg,
                                                   name='encoder')
        self.decoder = models.TransformerDecoder(config=cfg.base_cfg,
                                                 name='decoder')

        self.ae = Autoencoder(config=cfg.base_cfg, c=cfg.c, name='ae')
        self.vq = vqvae.VectorQuantizerEMA(
            config=cfg.base_cfg,
            num_embeddings=cfg.latent_vocab_size,
            commitment_cost=cfg.commitment_cost_vq,
            name='vq')
        self.latent_pos_emb = models.AddPositionEmbs(config=cfg.base_cfg,
                                                     cache=False,
                                                     name='posembed_latent')
Ejemplo n.º 6
0
    def setup(self):
        cfg = self.config

        base_cfg = models.TransformerConfig(
            vocab_size=cfg.vocab_size,
            output_vocab_size=cfg.output_vocab_size,
            shift=cfg.shift,
            dtype=cfg.dtype,
            emb_dim=cfg.emb_dim,
            num_heads=cfg.num_heads,
            num_layers=cfg.num_layers,
            qkv_dim=cfg.qkv_dim,
            mlp_dim=cfg.mlp_dim,
            max_len=cfg.max_len,
            dropout_rate=cfg.dropout_rate,
            attention_dropout_rate=cfg.attention_dropout_rate,
            deterministic=cfg.deterministic,
            decode=cfg.decode,
            bos_token=cfg.bos_token,
            output_head=cfg.output_head,
            kernel_init=cfg.kernel_init,
            bias_init=cfg.bias_init,
            posemb_init=cfg.posemb_init)
        self.encoder = models.TransformerIOEncoder(config=base_cfg,
                                                   name='encoder')
        self.decoder = models.TransformerDecoder(config=base_cfg,
                                                 name='decoder')

        self.ae = Autoencoder(config=base_cfg, c=cfg.c, name='ae')
        self.vq = vqvae.VectorQuantizerEMA(
            config=base_cfg,
            num_embeddings=cfg.latent_vocab_size,
            commitment_cost=cfg.commitment_cost_vq,
            name='vq')
        self.latent_pos_emb = models.AddPositionEmbs(config=base_cfg,
                                                     cache=False,
                                                     name='posembed_latent')