Ejemplo n.º 1
0
    def forward(self, input):
        # if activation bias is used on leaky relu else, directly in the linear function
        if self.activation:
            out = F.linear(input, self.weight * self.scale)
            out = fused_leaky_relu(out, self.bias * self.lr_mul)

        else:
            out = F.linear(input,
                           self.weight * self.scale,
                           bias=self.bias * self.lr_mul)

        return out
Ejemplo n.º 2
0
 def _in_proj(self, input, start=0, end=None):
     weight = self.in_proj_weight
     bias = self.in_proj_bias
     weight = weight[start:end, :]
     if bias is not None:
         bias = bias[start:end]
     return F.linear(input, weight, bias)
    def forward(self, input, sample=False, calculate_log_probs=False):
        # 1. Sample weights and bias from variational posterior
        if self.training or sample:
            weight = self.weight.sample()
            bias = self.bias.sample()
        else:
            weight = self.weight.mu
            bias = self.bias.mu

        # 2. Update log_prior and log_posterior according to current approximation
        if self.training or calculate_log_probs:
            self.log_prior = self.weight_prior.log_prob(
                weight) + self.bias_prior.log_prob(bias)
            self.log_variational_posterior = self.weight.log_prob(
                weight) + self.bias.log_prob(bias)
        else:
            self.log_prior, self.log_variational_posterior = 0, 0

        # 3. Do a forward pass through the layer
        return F.linear(input, weight, bias)
Ejemplo n.º 4
0
 def forward(self, input):
     return F.linear(input, self.mask * self.weight, self.bias)
Ejemplo n.º 5
0
 def forward(self, input):
     if DEBUG:
         print("masked linear: ", torch.any(torch.isnan(input)), input.mean())
     return F.linear(input, self.mask * self.weight, self.bias)