def test_check_output(self): fluid.enable_imperative() linear = paddle.nn.Conv2d(2, 3, 3) before_weight = linear.weight.numpy() if self.dim == None: self.dim = -1 if self.dim != -1: self.dim = (self.dim + len(before_weight)) % len(before_weight) wn = weight_norm(linear, dim=self.dim) outputs = [] for name, data in self.data.items(): output = linear(fluid.dygraph.to_variable(data)) outputs.append(output.numpy()) after_weight = linear.weight self.actual_outputs = [ linear.weight_g.numpy(), linear.weight_v.numpy() ] expect_output = self.weight_normalize(before_weight, self.dim) for expect, actual in zip(expect_output, self.actual_outputs): self.assertTrue( numpy.allclose(numpy.array(actual), expect, atol=0.001))
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2): super(TemporalBlock, self).__init__() self.conv1 = weight_norm( nn.Conv1D( n_inputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation)) # Chomp1d is used to make sure the network is causal. # We pad by (k-1)*d on the two sides of the input for convolution, # and then use Chomp1d to remove the (k-1)*d output elements on the right. self.chomp1 = Chomp1d(padding) self.relu1 = nn.ReLU() self.dropout1 = nn.Dropout(dropout) self.conv2 = weight_norm( nn.Conv1D( n_outputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation)) self.chomp2 = Chomp1d(padding) self.relu2 = nn.ReLU() self.dropout2 = nn.Dropout(dropout) self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2) self.downsample = nn.Conv1D(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None self.relu = nn.ReLU() self.init_weights()
def test_check_output(self): fluid.enable_imperative() linear = paddle.nn.Conv2d(2, 3, 3) before_weight = linear.weight wn = weight_norm(linear, dim=self.dim) rwn = remove_weight_norm(linear) after_weight = linear.weight self.assertTrue( numpy.allclose(before_weight.numpy(), after_weight.numpy(), atol=0.001))