コード例 #1
0
 def forward(self, *x):
     x = enforce_singleton(x)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     x = l2_normalize(x, axis=self.axis, keepdims=True)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     return x
コード例 #2
0
 def forward(self, *x):
     x = enforce_singleton(x)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     return x
コード例 #3
0
 def forward(self, x, **kwargs):
     x = enforce_singleton(x)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     x = x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     return x
コード例 #4
0
    def forward(self, *x):
        x = enforce_singleton(x)
        pos = relu(x)
        reshape_shape = [1] * len(x.shape)
        reshape_shape[1] = self.num_parameters

        neg = self.weight.view(*reshape_shape) * (x - abs(x)) * 0.5
        return pos + neg
コード例 #5
0
    def forward(self, *x):
        x = enforce_singleton(x)
        if self.training == True:
            # Recompute weights for each forward pass
            self._compute_weights()

        output = self.layer(x)
        return output
コード例 #6
0
 def forward(self, *x):
     x = enforce_singleton(x)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     self._update_u_v()
     x = self.module(x)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     return x
コード例 #7
0
 def forward(self, *x):
     x = enforce_singleton(x)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias,
                      self.eps)
     if hasattr(self, 'in_sequence') and self.in_sequence:
         x = x.permute(0, 2, 1)
     return x
コード例 #8
0
    def forward(self, *x):
        """
        Args:
        x: Input tensor.

        Returns: output tensor

        """
        x = enforce_singleton(x)
        return relu(x)
コード例 #9
0
    def forward(self, *x):
        x = enforce_singleton(x)
        if hasattr(self, 'in_sequence') and self.in_sequence:
            x = x.permute(0, 2, 1)
        x = F.instance_norm(x, self.running_mean, self.running_var,
                            self.weight, self.bias, self.training
                            or not self.track_running_stats, self.momentum,
                            self.eps)

        if hasattr(self, 'in_sequence') and self.in_sequence:
            x = x.permute(0, 2, 1)
        return x
コード例 #10
0
    def forward(self, *x):
        x = enforce_singleton(x)
        if hasattr(self, 'in_sequence') and self.in_sequence:
            x = x.permute(0, 2, 1)

        # exponential_average_factor is set to self.momentum
        # (when it is available) only so that it gets updated
        # in ONNX graph when this node is exported to ONNX.
        if self.momentum is None:
            exponential_average_factor = 0.0
        else:
            exponential_average_factor = self.momentum

        if self.training and self.track_running_stats:
            # TODO: if statement only here to tell the jit to skip emitting this when it is None
            if self.num_batches_tracked is not None:
                self.num_batches_tracked = self.num_batches_tracked + 1
                if self.momentum is None:  # use cumulative moving average
                    exponential_average_factor = 1.0 / float(
                        self.num_batches_tracked)
                else:  # use exponential moving average
                    exponential_average_factor = self.momentum
        """ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
                Mini-batch stats are used in training mode, and in eval mode when buffers are None.
                """
        if self.training:
            bn_training = True
        else:
            bn_training = (self.running_mean is None) and (self.running_var is
                                                           None)
        """Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
                passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
                used for normalization (i.e. in eval mode when buffers are not None).
                """
        x = F.batch_norm(
            x,
            # If buffers are not to be tracked, ensure that they won't be updated
            self.running_mean
            if not self.training or self.track_running_stats else None,
            self.running_var
            if not self.training or self.track_running_stats else None,
            self.weight,
            self.bias,
            bn_training,
            exponential_average_factor,
            self.eps)

        if hasattr(self, 'in_sequence') and self.in_sequence:
            x = x.permute(0, 2, 1)
        return x
コード例 #11
0
    def forward(self, *x):
        x = enforce_singleton(x)
        # Prepare broadcasting shape.
        origin_shape = list(int_shape(x))
        group_shape = list(int_shape(x))
        last_dim = group_shape[self.axis]

        group_shape[self.axis] = last_dim // self.num_groups
        group_shape.insert(self.axis, self.groups)
        x = reshape(x, group_shape)
        x_mean, x_variance = moments(x, axis=self.axis, keepdims=True)
        x = (x - x_mean) / (sqrt(x_variance) + self.eps)
        x = reshape(x, origin_shape)
        if self.affine:
            x = x * self.weight + self.bias
        return x
コード例 #12
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return hard_sigmoid(x)
コード例 #13
0
 def forward(self, *x):
     x = enforce_singleton(x)
     mean = x.mean(dim=self.axis, keepdim=True).detach()
     std = x.std(dim=self.axis, keepdim=True).detach()
     return self.weight * (x - mean) / (std + self._eps) + self.bias
コード例 #14
0
    def forward(self, *x):
        x = enforce_singleton(x)
        input_shape = x.shape
        ndims = len(x.shape)
        reduction_axes = [i for i in range(len(x.shape)) if i not in self.axis]
        broadcast_shape = [1] * ndims
        broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value

        def _broadcast(v):
            if v is not None and len(
                    v.shape) != ndims and reduction_axes != list(
                        range(ndims - 1)):
                return tf.reshape(v, broadcast_shape)
            return v

        scale, offset = _broadcast(self.weight), _broadcast(self.bias)

        mean, variance = tf.nn.moments(x, axes=reduction_axes, keepdims=True)
        running_mean = self.running_mean
        running_var = self.running_var

        if not self.training:
            mean, variance = self.running_mean, self.running_var

        new_mean, new_variance = mean, variance

        def _do_update(var, value):
            """Compute the updates for mean and variance."""
            return self.assign_moving_average(var, value, self.momentum,
                                              self.input_shape.prod())

        def mean_update():
            """Update the moving variance."""
            true_branch = lambda: _do_update(self.running_mean, new_mean)
            false_branch = lambda: self.running_mean
            if self.training:
                return true_branch
            else:
                return false_branch

        def variance_update():
            """Update the moving variance."""
            def true_branch_renorm():
                # We apply epsilon as part of the moving_stddev to mirror the training
                # code path.
                running_stddev = _do_update(sqrt(self.running_var),
                                            sqrt(new_variance + self.epsilon))
                self.running_var.assign(
                    tf.nn.relu(running_stddev * running_stddev - self.epsilon),
                    name='AssignNewValue')
                return self.running_var

            if self.renorm:
                true_branch = true_branch_renorm
            else:
                true_branch = lambda: _do_update(self.running_var, new_variance
                                                 )

            false_branch = lambda: self.running_var
            if self.training:
                return true_branch
            else:
                return false_branch

        mean_update()
        variance_update()

        return tf.nn.batch_normalization(x, self.running_mean,
                                         self.running_var, offset, scale,
                                         self.eps)
コード例 #15
0
 def forward(self, *x):
     x = enforce_singleton(x)
     x = sin(self.w0 * x)
     return x
コード例 #16
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return leaky_relu(x, self.alpha)
コード例 #17
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return log_softmax(x)
コード例 #18
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return gpt_gelu(x)
コード例 #19
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return hard_mish(x)
コード例 #20
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return softmax(x, axis=self.axis)
コード例 #21
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return log_log(x)
コード例 #22
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return soft_plus(x)
コード例 #23
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return leaky_relu6(x)
コード例 #24
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return tanh(x)
コード例 #25
0
 def forward(self, *x):
     x = enforce_singleton(x)
     return smooth_relu(x)