Example #1
0
 def forward(self, inputs):
     if self.activation:
         out = F.linear(inputs, self.weight * self.scale)
         out = fused_leaky_relu(out, self.bias * self.lr_mul)
     else:
         out = F.linear(inputs, self.weight * self.scale, bias=self.bias * self.lr_mul)
     return out
Example #2
0
    def forward(self, input):
        if self.activation:
            out = F.linear(input, self.weight * self.scale)
            out = fused_leaky_relu(out, self.bias * self.lr_mul)

        else:
            bias = None if self.bias is None else self.bias * self.lr_mul
            out = F.linear(input, self.weight * self.scale, bias=bias)

        return out
Example #3
0
    def forward(self, input, labels):
        out = F.linear(input, self.weight * self.scale_w, bias=None)

        if self.bias is not None:
            bias = F.linear(labels, self.bias * self.scale_b, bias=None)
            out = out + bias * self.lr_mul

        if self.activation:
            out = fused_leaky_relu(out, bias=None)

        return out
Example #4
0
    def forward(self, input):
        if 'fused_lrelu' == self.activation:
            negative_slope = 0.2
            scale = 2 ** 0.5
            out = F.linear(input, self.weight * self.scale)
            out = fused_leaky_relu(out, self.bias * self.lr_mul,negative_slope,scale)
        elif 'tanh' == self.activation:
            out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)
            out = torch.tanh(out)
        else:
            out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)

        return out
Example #5
0
    def forward(self, input):
        if self.activation:
            # print('weight', self.weight.size())
            # print('input', input.size())
            out = F.linear(input, self.weight * self.scale)
            out = fused_leaky_relu(out, self.bias * self.lr_mul)

        else:
            out = F.linear(input,
                           self.weight * self.scale,
                           bias=self.bias * self.lr_mul)

        return out
Example #6
0
 def forward(self, input):
     if self.activation == 'fused_lrelu':
         out = F.linear(input, self.weight * self.scale)
         out = fused_leaky_relu(out, self.bias * self.lr_mul)
     else:
         out = F.linear(input,
                        self.weight * self.scale,
                        bias=self.bias * self.lr_mul)
         if self.activation == 'relu':
             out = F.relu(out)
         elif self.activation == 'lrelu':
             out = F.leaky_relu(out, negative_slope=0.2)
         elif self.activation == 'selu':
             out = F.selu(out)
         elif self.activation == 'tanh':
             out = F.tanh(out)
     return out
Example #7
0
    def forward(self, input):
        """
        Return, the transformed x.
        Parameters
        ----------
        x: pytorch tensor, used for the input of linear.
        Returns
        -------
        the transformed x.
        """
        
        if self.activation:
            out = F.linear(input, self.weight * self.scale)
            out = fused_leaky_relu(out, self.bias * self.lr_mul)

        else:
            out = F.linear(
                input, self.weight * self.scale, bias=self.bias * self.lr_mul
            )

        return out