Ejemplo n.º 1
0
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = flow.zeros((max_len, d_model))
        position = flow.arange(0, max_len, dtype=flow.float).unsqueeze(1)
        div_term = flow.exp(
            flow.arange(0, d_model, 2).to(flow.float) * (-math.log(10000.0) / d_model)
        ).unsqueeze(0)
        pe[:, 0::2] = flow.sin(position * div_term)
        pe[:, 1::2] = flow.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.pe = flow.nn.Parameter(pe, requires_grad=False)
Ejemplo n.º 2
0
 def __init__(self, d_model, max_len=5000):
     super(PositionalEncoding, self).__init__()
     # Compute the positional encodings once in log space.
     pe = flow.zeros(max_len, d_model, requires_grad=False)
     position = flow.arange(0, max_len).unsqueeze(1).to(dtype=flow.float32)
     div_term = flow.exp(
         flow.arange(0, d_model, 2).to(dtype=flow.float32)
         * -(math.log(10000.0) / d_model)
     )
     pe[:, 0::2] = flow.sin(position * div_term)
     pe[:, 1::2] = flow.cos(position * div_term)
     pe = pe.unsqueeze(0)
     self.register_buffer("pe", pe)
Ejemplo n.º 3
0
 def _embedding_from_positions(self, position):
     """get absolute pos embedding based position.
     Args:
         position (torch.Tensor): Input. Its shape is (b, t)
     Returns:
         posemb (torch.Tensor): Encoded tensor. Its shape is (b, time, emb_dim)
     """
     batch_size, time_step = position.size()
     posemb = flow.zeros(batch_size,
                         time_step,
                         self.emb_dim,
                         device=position.device)
     div_term = flow.exp(
         flow.arange(
             0, self.emb_dim, 2, device=position.device, dtype=flow.float32)
         * -(math.log(10000.0) / self.emb_dim))
     posemb[:, :,
            0::2] = flow.sin(position.float().unsqueeze(-1) * div_term)
     posemb[:, :,
            1::2] = flow.cos(position.float().unsqueeze(-1) * div_term)
     return posemb
Ejemplo n.º 4
0
 def ae_step(self, data, lambda_kl):
     x = cc(data)
     mu, log_sigma, emb, dec = self.model(x)
     criterion = nn.L1Loss()
     loss_rec = criterion(dec, x)
     loss_kl = 0.5 * flow.mean(
         flow.exp(log_sigma) + flow.mul(mu, mu) - 1 - log_sigma)
     loss = self.config["lambda"][
         "lambda_rec"] * loss_rec + lambda_kl * loss_kl
     self.opt.zero_grad()
     loss.backward()
     grad_norm = flow.nn.utils.clip_grad_norm_(
         self.model.parameters(),
         max_norm=self.config["optimizer"]["grad_norm"])
     self.opt.step()
     meta = {
         "loss_rec": loss_rec.item(),
         "loss_kl": loss_kl.item(),
         "loss": loss.item(),
         "grad_norm": grad_norm,
     }
     return meta
Ejemplo n.º 5
0
def _exp(self):
    return flow.exp(self)
Ejemplo n.º 6
0
 def test_exp(test_case):
     input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
     of_out = flow.exp(input)
     np_out = np.exp(input.numpy())
     test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
Ejemplo n.º 7
0
 def forward(self, x):
     return flow.exp(x)
Ejemplo n.º 8
0
 def forward(self, x):
     emb = self.speaker_encoder(x)
     mu, log_sigma = self.content_encoder(x)
     eps = log_sigma.new_ones(tuple([*log_sigma.size()])).normal_(0, 1)
     dec = self.decoder(mu + flow.exp(log_sigma / 2) * eps, emb)
     return mu, log_sigma, emb, dec
Ejemplo n.º 9
0
 def forward(self, x):
     return flow.where(
         x * self.beta > self.threshold,
         x,
         1 / self.beta * flow.log(1.0 + flow.exp(self.beta * x)),
     )