예제 #1
0
 def forward(self,x):
   device =torch.device("cuda" if torch.cuda.is_available() else "cpu")
   weight = torch.tensor([0.25],device=device)
   x=self.flatten(x)
   x=F.prelu(self.ln1(x),weight)
   x=F.prelu(self.ln2(x),weight)
   x=F.prelu(self.ln3(x),weight)
   return x
예제 #2
0
 def forward_(self, pos_x, neg_x, edge_index):
     # type: (Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
     """Returns the latent space for the input arguments, their
     corruptions and their summary representation."""
     pos_z = F.prelu(self.gcn(pos_x, edge_index), self.reluWeight)
     neg_z = F.prelu(self.gcn(neg_x, edge_index), self.reluWeight)
     summary = torch.sigmoid(torch.mean(pos_z, dim=0))
     return pos_z, neg_z, summary
예제 #3
0
 def forward_(self, pos_x, neg_x, first_edge_index, second_edge_index):
     # type: (Tensor, Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor, Tensor]
     pos_x = F.prelu(self.gcn1(pos_x, second_edge_index), self.prelu1)
     pos_z = F.prelu(self.gcn2(pos_x, first_edge_index), self.prelu2)
     neg_x = F.prelu(self.gcn1(neg_x, second_edge_index), self.prelu1)
     neg_z = F.prelu(self.gcn2(neg_x, first_edge_index), self.prelu2)
     summary = torch.sigmoid(torch.mean(pos_z, dim=0))
     return pos_z, neg_z, summary
예제 #4
0
 def inverse(self, x):
     inv = F.prelu(x, 1 / torch.exp(self.alphas))
     logdet = (inv >= 0).float() * torch.ones(size=(
         self.hparams['channels'], )) + (inv < 0).float() * self.alphas
     logdet = torch.sum(logdet, dim=1)
     #print("Prelu logdet "+str(logdet))
     return (inv, logdet)
예제 #5
0
    def forward(self, x):
        mp_ks = 2
        mp_strd = 2

        x = self.prelu1_1(self.conv1(x))
        x = self.prelu1_2(self.conv2(x))
        x = F.max_pool2d(x, kernel_size=mp_ks, stride=mp_strd)

        x = self.prelu2_1(self.conv3(x))
        x = self.prelu2_2(self.conv4(x))
        x = F.max_pool2d(x, kernel_size=mp_ks, stride=mp_strd)

        x = self.prelu3_1(self.conv5(x))
        x = self.prelu3_2(self.conv6(x))
        x = F.max_pool2d(x, kernel_size=mp_ks, stride=mp_strd)

        x = x.view(-1, 3 * 3 * 512)  # Flatten

        features3d = self.fc1(x)
        # features2d = self.fc2(features3d)
        x = F.prelu(features3d, self.prelu_weight)

        x = self.fc3(x)

        return features3d, x
예제 #6
0
    def forward(self, x):
        if self.batchnorm1 == 'On':
            x = self.fc0_bn(x)
        x = self.fc1(x)
        x = F.prelu(x, torch.tensor(0.1))
        if self.batchnorm1 == 'On':
            x = self.fc1_bn(x)
        x = self.fc1_do(x)
        x = self.fc2(x)
        x = F.prelu(x, torch.tensor(0.1))
        if self.batchnorm2 == 'On':
            x = self.fc2_bn(x)
        x = self.fc2_do(x)
        x = self.fc3(x)

        return x
예제 #7
0
def prelu(input, *args, **kwargs):
    output = F.prelu(input.F, *args, **kwargs)
    return SparseTensor(
        output,
        coordinate_map_key=input.coordinate_map_key,
        coordinate_manager=input.coordinate_manager,
    )
예제 #8
0
    def forward(self, x):
        p = self.convP1(x)
        #print('conv1 shape: ' + str(x.shape))
        p = self.convP1_drop(p)
        #print('drop  shape: ' + str(x.shape))
        #Size changes from (20, 64, 64) to (30, 32, 32)
        p = F.max_pool2d(p, kernel_size=2)
        #print('pool1 shape: ' + str(x.shape))
        p = F.prelu(p, torch.tensor(.25))
        #Size changes from (20, 32, 32) to (30, 32, 32)
        p = self.convP2(p)
        #print('conv2 shape: ' + str(x.shape))
        p = self.convP2_drop(p)
        #print('drop  shape: ' + str(x.shape))
        #Size changes from (30, 32, 32) to (30, 16, 16)
        p = F.max_pool2d(p, kernel_size=2)
        #print('pool2 shape: ' + str(x.shape))
        p = F.prelu(p, torch.tensor(.25))
        p = p.view((-1, 1024))
        #print('post view shape: ' + str(x.shape))
        p = self.fcP1(p)


        v = self.convV1(x)
        #print('conv1 shape: ' + str(x.shape))
        v = self.convV1_drop(v)
        #print('drop  shape: ' + str(x.shape))
        #Size changes from (20, 64, 64) to (30, 32, 32)
        v = F.max_pool2d(v, kernel_size=2)
        #print('pool1 shape: ' + str(x.shape))
        v = F.prelu(v, torch.tensor(.25))
        #Size changes from (20, 32, 32) to (30, 32, 32)
        v = self.convV2(v)
        #print('conv2 shape: ' + str(x.shape))
        v = self.convV2_drop(v)
        #print('drop  shape: ' + str(x.shape))
        #Size changes from (30, 32, 32) to (30, 16, 16)
        v = F.max_pool2d(v, kernel_size=2)
        #print('pool2 shape: ' + str(x.shape))
        v = F.prelu(v, torch.tensor(.25))
        v = v.view((-1, 1024))
        #print('post view shape: ' + str(x.shape))
        v = self.fcV1(v)


        return p, v
예제 #9
0
 def forward(self, input):
     input = F.prelu(input, self.weight)
     input = clamp(input, -self.clip_val, self.clip_val, self.inplace)
     input = (input + 1) / 2
     input = LinearQuantizeSTE.apply(input, self.scale_factor,
                                     self.dequantize, self.inplace)
     input = input * 2 - 1
     return input
예제 #10
0
 def forward(self, x, y, z, w, w0, w1, w2, w3):
     x = F.prelu(x, w0)
     x = F.prelu(x, self.w4)
     y = F.prelu(y, w1)
     y = F.prelu(y, self.w5)
     z = F.prelu(z, w2)
     z = F.prelu(z, self.w6)
     w = F.prelu(w, w3)
     w = F.prelu(w, self.w7)
     return x, y, z, w
예제 #11
0
    def forward(self, batch):

        if self.tied:
            for i in range(self.num_layers):
                #             if self.tied:
                #                 W1 = self.W1[:, :, 0]
                #                 W2 = self.W2[:, :, 0]
                #             else:
                #                 W1 = self.W1[: ,:, i]
                #                 W2 = self.W2[:, :, i]

                batch = F.linear(batch,
                                 weight=self.W1[:, :, 0],
                                 bias=self.b[:, i])
                if self.layer_normalization:
                    batch = self.normed(batch)
                batch = F.prelu(batch, self.slope)
                # batch = torch.sigmoid(batch)
                if self.symmetric:
                    batch = F.linear(batch,
                                     weight=self.W1[:, :, 0].t(),
                                     bias=self.c[:, i])
                else:
                    batch = F.linear(batch,
                                     weight=self.W2[:, :, 0],
                                     bias=self.c[:, i])
        else:
            for i in range(self.num_layers):
                batch = F.linear(batch,
                                 weight=self.W1[:, :, i],
                                 bias=self.b[:, i])
                if self.layer_normalization:
                    batch = self.normed(batch)
                batch = F.prelu(batch, self.slope)
                # batch = torch.sigmoid(batch)
                if self.symmetric:
                    batch = F.linear(batch,
                                     weight=self.W1[:, :, i].t(),
                                     bias=self.c[:, i])
                else:
                    batch = F.linear(batch,
                                     weight=self.W2[:, :, i],
                                     bias=self.c[:, i])

        return batch
예제 #12
0
def standard_unit(input_tensor, inchannel, outchannel, kernel_size=3, padding=1):
    w1 = torch.empty(outchannel, inchannel, kernel_size, kernel_size)
    b1 = torch.empty(outchannel)
    torch.nn.init.kaiming_normal_(w1)
    torch.nn.init.kaiming_normal_(b1)
    x = F.conv2d(input_tensor, w1, b1, padding=padding)
    x = F.batch_norm(x)
    x = F.prelu(x)
    x = F.dropout2d(x, dropout_rate)
    w2 = torch.empty(outchannel, outchannel, kernel_size, kernel_size)
    b2 = torch.empty(outchannel)
    torch.nn.init.kaiming_normal_(w2)
    torch.nn.init.kaiming_normal_(b2)
    x = F.conv2d(x, w1, b1, padding=padding)
    x = F.batch_norm(x)
    x = F.prelu(x)
    x = F.dropout2d(x, dropout_rate)

    return x
 def adpW(self,x):
     # x = F.normalize(x)
     x = self.adp_metric_embedding1(x)
     # x = self.adp_metric_embedding1_bn(x)
     x = F.prelu(x)
     x = self.adp_metric_embedding2(x)
     # x = self.adp_metric_embedding2_bn(x)
     diag_matrix = []
     for i in range(x.size(0)):
         diag_matrix.append(torch.diag(x[i,:]))
     x = torch.stack(diag_matrix)
     W = torch.matmul(self.transform_matrix,torch.matmul(x,self.transform_matrix))
     return W
예제 #14
0
 def embedding_(self, x, first_edge_index, second_edge_index):
     r"""Generates learned representation of the input nodes."""
     x = F.prelu(self.gcn1(x, second_edge_index), self.prelu1)
     x = F.prelu(self.gcn2(x, first_edge_index), self.prelu2)
     return x
예제 #15
0
파일: prelu_test.py 프로젝트: zjlgame/glow
 def forward(self, inputs, weights):
     return F.prelu(inputs + inputs, weights)
예제 #16
0
 def forward(self, x):
     from torch.nn import functional as F
     weights = torch.FloatTensor(torch.rand(self.num_params).numpy())
     return F.prelu(x, weight=weights)
예제 #17
0
 def _prelu(self, tensor):
     return F.prelu(tensor, self.weight)
예제 #18
0
 'relu':
 F.relu,
 'sigmoid':
 torch.sigmoid,
 'tanh':
 torch.tanh,
 'softabs':
 lambda x, epsilon: torch.sqrt(torch.pow(x, 2.0) + epsilon),
 'softmax':
 nnef_softmax,
 'softplus':
 lambda x: torch.log(torch.exp(x) + 1.0),
 'elu':
 F.elu,
 'prelu':
 lambda x, alpha: F.prelu(x, alpha),
 'leaky_relu':
 lambda x, alpha: F.leaky_relu(x, alpha),
 'max_pool_with_index':
 nnef_max_pool_with_index,
 'max_pool':
 nnef_max_pool,
 'avg_pool':
 nnef_avg_pool,
 'rms_pool':
 nnef_rms_pool,
 'linear':
 nnef_linear,
 'separable_conv':
 nnef_separable_conv,
 'separable_deconv':
예제 #19
0
 def forward(self, input: torch.Tensor) -> torch.Tensor:
     return F.prelu(input, self.weight)
예제 #20
0
sumproducts = np.matmul(x1_list, x2_list)

#print(sumproducts)

# 2) vectorize_Relu

#x = np.array([x1_list, x2_list])

x = torch.randn(np.random.randint(3, 10), np.random.randint(3, 10),
                np.random.randint(3, 10))
y = F.relu(x)

# 3) vectorize_Prelu
a = torch.tensor(0.1)
y = F.prelu(x, a)

print(x)
print(x.shape)

# Problem 3
# 1) slice fixed point


def result_slice_fixed_points(df, length, position):
    for i in range(df.shape[0]):
        result = torch.tensor(df.shape)
        print(result.shape)
        #result[i] = df[position:position+length]
        print(df[position:position + length])
    return result
예제 #21
0
 def forward(self, input):
     self.weight.clamp(0, 0.5)
     return F.prelu(input, self.weight) / torch.sqrt(1. + self.weight**2)
예제 #22
0
 def PRELU_2d(self, x, alph):
     y = F.prelu(x, alph)
     return y
예제 #23
0
 def forward(self, x):
     y = F.prelu(x, torch.exp(self.alphas))
     return y
예제 #24
0
 def forward(self, x):
     self.minusscalar0_second = torch.autograd.Variable(torch.from_numpy(
         _weights_dict['minusscalar0_second']['value']),
                                                        requires_grad=False)
     self.mulscalar0_second = torch.autograd.Variable(torch.from_numpy(
         _weights_dict['mulscalar0_second']['value']),
                                                      requires_grad=False)
     minusscalar0 = x - self.minusscalar0_second
     mulscalar0 = minusscalar0 * self.mulscalar0_second
     conv_1_conv2d_pad = F.pad(mulscalar0, (1, 1, 1, 1))
     conv_1_conv2d = self.conv_1_conv2d(conv_1_conv2d_pad)
     conv_1_batchnorm = self.conv_1_batchnorm(conv_1_conv2d)
     conv_1_relu = F.prelu(
         conv_1_batchnorm,
         torch.from_numpy(_weights_dict['conv_1_relu']['weights']))
     conv_2_dw_conv2d_pad = F.pad(conv_1_relu, (1, 1, 1, 1))
     conv_2_dw_conv2d = self.conv_2_dw_conv2d(conv_2_dw_conv2d_pad)
     conv_2_dw_batchnorm = self.conv_2_dw_batchnorm(conv_2_dw_conv2d)
     conv_2_dw_relu = F.prelu(
         conv_2_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_2_dw_relu']['weights']))
     conv_2_conv2d = self.conv_2_conv2d(conv_2_dw_relu)
     conv_2_batchnorm = self.conv_2_batchnorm(conv_2_conv2d)
     conv_2_relu = F.prelu(
         conv_2_batchnorm,
         torch.from_numpy(_weights_dict['conv_2_relu']['weights']))
     conv_3_dw_conv2d_pad = F.pad(conv_2_relu, (1, 1, 1, 1))
     conv_3_dw_conv2d = self.conv_3_dw_conv2d(conv_3_dw_conv2d_pad)
     conv_3_dw_batchnorm = self.conv_3_dw_batchnorm(conv_3_dw_conv2d)
     conv_3_dw_relu = F.prelu(
         conv_3_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_3_dw_relu']['weights']))
     conv_3_conv2d = self.conv_3_conv2d(conv_3_dw_relu)
     conv_3_batchnorm = self.conv_3_batchnorm(conv_3_conv2d)
     conv_3_relu = F.prelu(
         conv_3_batchnorm,
         torch.from_numpy(_weights_dict['conv_3_relu']['weights']))
     conv_4_dw_conv2d_pad = F.pad(conv_3_relu, (1, 1, 1, 1))
     conv_4_dw_conv2d = self.conv_4_dw_conv2d(conv_4_dw_conv2d_pad)
     conv_4_dw_batchnorm = self.conv_4_dw_batchnorm(conv_4_dw_conv2d)
     conv_4_dw_relu = F.prelu(
         conv_4_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_4_dw_relu']['weights']))
     conv_4_conv2d = self.conv_4_conv2d(conv_4_dw_relu)
     conv_4_batchnorm = self.conv_4_batchnorm(conv_4_conv2d)
     conv_4_relu = F.prelu(
         conv_4_batchnorm,
         torch.from_numpy(_weights_dict['conv_4_relu']['weights']))
     conv_5_dw_conv2d_pad = F.pad(conv_4_relu, (1, 1, 1, 1))
     conv_5_dw_conv2d = self.conv_5_dw_conv2d(conv_5_dw_conv2d_pad)
     conv_5_dw_batchnorm = self.conv_5_dw_batchnorm(conv_5_dw_conv2d)
     conv_5_dw_relu = F.prelu(
         conv_5_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_5_dw_relu']['weights']))
     conv_5_conv2d = self.conv_5_conv2d(conv_5_dw_relu)
     conv_5_batchnorm = self.conv_5_batchnorm(conv_5_conv2d)
     conv_5_relu = F.prelu(
         conv_5_batchnorm,
         torch.from_numpy(_weights_dict['conv_5_relu']['weights']))
     conv_6_dw_conv2d_pad = F.pad(conv_5_relu, (1, 1, 1, 1))
     conv_6_dw_conv2d = self.conv_6_dw_conv2d(conv_6_dw_conv2d_pad)
     conv_6_dw_batchnorm = self.conv_6_dw_batchnorm(conv_6_dw_conv2d)
     conv_6_dw_relu = F.prelu(
         conv_6_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_6_dw_relu']['weights']))
     conv_6_conv2d = self.conv_6_conv2d(conv_6_dw_relu)
     conv_6_batchnorm = self.conv_6_batchnorm(conv_6_conv2d)
     conv_6_relu = F.prelu(
         conv_6_batchnorm,
         torch.from_numpy(_weights_dict['conv_6_relu']['weights']))
     conv_7_dw_conv2d_pad = F.pad(conv_6_relu, (1, 1, 1, 1))
     conv_7_dw_conv2d = self.conv_7_dw_conv2d(conv_7_dw_conv2d_pad)
     conv_7_dw_batchnorm = self.conv_7_dw_batchnorm(conv_7_dw_conv2d)
     conv_7_dw_relu = F.prelu(
         conv_7_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_7_dw_relu']['weights']))
     conv_7_conv2d = self.conv_7_conv2d(conv_7_dw_relu)
     conv_7_batchnorm = self.conv_7_batchnorm(conv_7_conv2d)
     conv_7_relu = F.prelu(
         conv_7_batchnorm,
         torch.from_numpy(_weights_dict['conv_7_relu']['weights']))
     conv_8_dw_conv2d_pad = F.pad(conv_7_relu, (1, 1, 1, 1))
     conv_8_dw_conv2d = self.conv_8_dw_conv2d(conv_8_dw_conv2d_pad)
     conv_8_dw_batchnorm = self.conv_8_dw_batchnorm(conv_8_dw_conv2d)
     conv_8_dw_relu = F.prelu(
         conv_8_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_8_dw_relu']['weights']))
     conv_8_conv2d = self.conv_8_conv2d(conv_8_dw_relu)
     conv_8_batchnorm = self.conv_8_batchnorm(conv_8_conv2d)
     conv_8_relu = F.prelu(
         conv_8_batchnorm,
         torch.from_numpy(_weights_dict['conv_8_relu']['weights']))
     conv_9_dw_conv2d_pad = F.pad(conv_8_relu, (1, 1, 1, 1))
     conv_9_dw_conv2d = self.conv_9_dw_conv2d(conv_9_dw_conv2d_pad)
     conv_9_dw_batchnorm = self.conv_9_dw_batchnorm(conv_9_dw_conv2d)
     conv_9_dw_relu = F.prelu(
         conv_9_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_9_dw_relu']['weights']))
     conv_9_conv2d = self.conv_9_conv2d(conv_9_dw_relu)
     conv_9_batchnorm = self.conv_9_batchnorm(conv_9_conv2d)
     conv_9_relu = F.prelu(
         conv_9_batchnorm,
         torch.from_numpy(_weights_dict['conv_9_relu']['weights']))
     conv_10_dw_conv2d_pad = F.pad(conv_9_relu, (1, 1, 1, 1))
     conv_10_dw_conv2d = self.conv_10_dw_conv2d(conv_10_dw_conv2d_pad)
     conv_10_dw_batchnorm = self.conv_10_dw_batchnorm(conv_10_dw_conv2d)
     conv_10_dw_relu = F.prelu(
         conv_10_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_10_dw_relu']['weights']))
     conv_10_conv2d = self.conv_10_conv2d(conv_10_dw_relu)
     conv_10_batchnorm = self.conv_10_batchnorm(conv_10_conv2d)
     conv_10_relu = F.prelu(
         conv_10_batchnorm,
         torch.from_numpy(_weights_dict['conv_10_relu']['weights']))
     conv_11_dw_conv2d_pad = F.pad(conv_10_relu, (1, 1, 1, 1))
     conv_11_dw_conv2d = self.conv_11_dw_conv2d(conv_11_dw_conv2d_pad)
     conv_11_dw_batchnorm = self.conv_11_dw_batchnorm(conv_11_dw_conv2d)
     conv_11_dw_relu = F.prelu(
         conv_11_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_11_dw_relu']['weights']))
     conv_11_conv2d = self.conv_11_conv2d(conv_11_dw_relu)
     conv_11_batchnorm = self.conv_11_batchnorm(conv_11_conv2d)
     conv_11_relu = F.prelu(
         conv_11_batchnorm,
         torch.from_numpy(_weights_dict['conv_11_relu']['weights']))
     conv_12_dw_conv2d_pad = F.pad(conv_11_relu, (1, 1, 1, 1))
     conv_12_dw_conv2d = self.conv_12_dw_conv2d(conv_12_dw_conv2d_pad)
     conv_12_dw_batchnorm = self.conv_12_dw_batchnorm(conv_12_dw_conv2d)
     conv_12_dw_relu = F.prelu(
         conv_12_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_12_dw_relu']['weights']))
     conv_12_conv2d = self.conv_12_conv2d(conv_12_dw_relu)
     conv_12_batchnorm = self.conv_12_batchnorm(conv_12_conv2d)
     conv_12_relu = F.prelu(
         conv_12_batchnorm,
         torch.from_numpy(_weights_dict['conv_12_relu']['weights']))
     conv_13_dw_conv2d_pad = F.pad(conv_12_relu, (1, 1, 1, 1))
     conv_13_dw_conv2d = self.conv_13_dw_conv2d(conv_13_dw_conv2d_pad)
     conv_13_dw_batchnorm = self.conv_13_dw_batchnorm(conv_13_dw_conv2d)
     conv_13_dw_relu = F.prelu(
         conv_13_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_13_dw_relu']['weights']))
     conv_13_conv2d = self.conv_13_conv2d(conv_13_dw_relu)
     conv_13_batchnorm = self.conv_13_batchnorm(conv_13_conv2d)
     conv_13_relu = F.prelu(
         conv_13_batchnorm,
         torch.from_numpy(_weights_dict['conv_13_relu']['weights']))
     conv_14_dw_conv2d_pad = F.pad(conv_13_relu, (1, 1, 1, 1))
     conv_14_dw_conv2d = self.conv_14_dw_conv2d(conv_14_dw_conv2d_pad)
     conv_14_dw_batchnorm = self.conv_14_dw_batchnorm(conv_14_dw_conv2d)
     conv_14_dw_relu = F.prelu(
         conv_14_dw_batchnorm,
         torch.from_numpy(_weights_dict['conv_14_dw_relu']['weights']))
     conv_14_conv2d = self.conv_14_conv2d(conv_14_dw_relu)
     conv_14_batchnorm = self.conv_14_batchnorm(conv_14_conv2d)
     conv_14_relu = F.prelu(
         conv_14_batchnorm,
         torch.from_numpy(_weights_dict['conv_14_relu']['weights']))
     conv_15_conv2d_pad = F.pad(conv_14_relu, (1, 1, 1, 1))
     conv_15_conv2d = self.conv_15_conv2d(conv_15_conv2d_pad)
     conv_15_batchnorm = self.conv_15_batchnorm(conv_15_conv2d)
     conv_15_relu = F.prelu(
         conv_15_batchnorm,
         torch.from_numpy(_weights_dict['conv_15_relu']['weights']))
     flatten0 = conv_15_relu.view(conv_15_relu.size(0), -1)
     fc1 = self.fc1(flatten0)
     return fc1
예제 #25
0
 def forward(self, input):
     weight = torch.reshape(self.weight,
                            [1, self.n_channels, 1, self.n_features])
     return F.prelu(input, weight)
예제 #26
0
 def forward(self, input):
     return (F.prelu(input, self.weight) - self.post_mean) / self.post_stdv
예제 #27
0
파일: prelu_test.py 프로젝트: Bensonlp/glow
 def prelu_basic(inputs, weight):
     return F.prelu(inputs+inputs, weight)
예제 #28
0
def prelu(input, *args, **kwargs):
    return _wrap_tensor(input, F.prelu(input.F, *args, **kwargs))
예제 #29
0
 def forward(self, x):
     y = block.conv(x)
     y = F.prelu(self.bn1(self.conv1(y)), self.weight)
     y = F.selu(self.bn2(self.conv2(y)))
     return x + y
예제 #30
0
 def test_prelu(self):
     inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
     weight = torch.randn(1, device='cuda', dtype=self.dtype)
     output = F.prelu(inp, weight)
예제 #31
0
 def forward(self, x):
     stem_conv1_pad = F.pad(x, (1, 1, 1, 1))
     stem_conv1 = self.stem_conv1(stem_conv1_pad)
     stem_bn2 = self.stem_bn2(stem_conv1)
     stem_relu1 = F.prelu(
         stem_bn2,
         torch.from_numpy(self.weights_dict['stem_relu1']['weights']))
     stage1_unit1_bn1 = self.stage1_unit1_bn1(stem_relu1)
     stage1_unit1_convr = self.stage1_unit1_convr(stem_relu1)
     stage1_unit1_conv1_pad = F.pad(stage1_unit1_bn1, (1, 1, 1, 1))
     stage1_unit1_conv1 = self.stage1_unit1_conv1(stage1_unit1_conv1_pad)
     stage1_unit1_bnr = self.stage1_unit1_bnr(stage1_unit1_convr)
     stage1_unit1_bn2 = self.stage1_unit1_bn2(stage1_unit1_conv1)
     stage1_unit1_prelu2 = F.prelu(
         stage1_unit1_bn2,
         torch.from_numpy(
             self.weights_dict['stage1_unit1_prelu2']['weights']))
     stage1_unit1_conv2_pad = F.pad(stage1_unit1_prelu2, (1, 1, 1, 1))
     stage1_unit1_conv2 = self.stage1_unit1_conv2(stage1_unit1_conv2_pad)
     stage1_unit1_bn4 = self.stage1_unit1_bn4(stage1_unit1_conv2)
     plus0 = stage1_unit1_bn4 + stage1_unit1_bnr
     stage2_unit1_bn1 = self.stage2_unit1_bn1(plus0)
     stage2_unit1_convr = self.stage2_unit1_convr(plus0)
     stage2_unit1_conv1_pad = F.pad(stage2_unit1_bn1, (1, 1, 1, 1))
     stage2_unit1_conv1 = self.stage2_unit1_conv1(stage2_unit1_conv1_pad)
     stage2_unit1_bnr = self.stage2_unit1_bnr(stage2_unit1_convr)
     stage2_unit1_bn2 = self.stage2_unit1_bn2(stage2_unit1_conv1)
     stage2_unit1_prelu2 = F.prelu(
         stage2_unit1_bn2,
         torch.from_numpy(
             self.weights_dict['stage2_unit1_prelu2']['weights']))
     stage2_unit1_conv2_pad = F.pad(stage2_unit1_prelu2, (1, 1, 1, 1))
     stage2_unit1_conv2 = self.stage2_unit1_conv2(stage2_unit1_conv2_pad)
     stage2_unit1_bn4 = self.stage2_unit1_bn4(stage2_unit1_conv2)
     plus1 = stage2_unit1_bn4 + stage2_unit1_bnr
     stage2_unit2_bn1 = self.stage2_unit2_bn1(plus1)
     stage2_unit2_conv1_pad = F.pad(stage2_unit2_bn1, (1, 1, 1, 1))
     stage2_unit2_conv1 = self.stage2_unit2_conv1(stage2_unit2_conv1_pad)
     stage2_unit2_bn2 = self.stage2_unit2_bn2(stage2_unit2_conv1)
     stage2_unit2_prelu2 = F.prelu(
         stage2_unit2_bn2,
         torch.from_numpy(
             self.weights_dict['stage2_unit2_prelu2']['weights']))
     stage2_unit2_conv2_pad = F.pad(stage2_unit2_prelu2, (1, 1, 1, 1))
     stage2_unit2_conv2 = self.stage2_unit2_conv2(stage2_unit2_conv2_pad)
     stage2_unit2_bn4 = self.stage2_unit2_bn4(stage2_unit2_conv2)
     plus2 = stage2_unit2_bn4 + plus1
     stage3_unit1_bn1 = self.stage3_unit1_bn1(plus2)
     stage3_unit1_convr = self.stage3_unit1_convr(plus2)
     stage3_unit1_conv1_pad = F.pad(stage3_unit1_bn1, (1, 1, 1, 1))
     stage3_unit1_conv1 = self.stage3_unit1_conv1(stage3_unit1_conv1_pad)
     stage3_unit1_bnr = self.stage3_unit1_bnr(stage3_unit1_convr)
     stage3_unit1_bn2 = self.stage3_unit1_bn2(stage3_unit1_conv1)
     stage3_unit1_prelu2 = F.prelu(
         stage3_unit1_bn2,
         torch.from_numpy(
             self.weights_dict['stage3_unit1_prelu2']['weights']))
     stage3_unit1_conv2_pad = F.pad(stage3_unit1_prelu2, (1, 1, 1, 1))
     stage3_unit1_conv2 = self.stage3_unit1_conv2(stage3_unit1_conv2_pad)
     stage3_unit1_bn4 = self.stage3_unit1_bn4(stage3_unit1_conv2)
     plus3 = stage3_unit1_bn4 + stage3_unit1_bnr
     stage3_unit2_bn1 = self.stage3_unit2_bn1(plus3)
     stage3_unit2_conv1_pad = F.pad(stage3_unit2_bn1, (1, 1, 1, 1))
     stage3_unit2_conv1 = self.stage3_unit2_conv1(stage3_unit2_conv1_pad)
     stage3_unit2_bn2 = self.stage3_unit2_bn2(stage3_unit2_conv1)
     stage3_unit2_prelu2 = F.prelu(
         stage3_unit2_bn2,
         torch.from_numpy(
             self.weights_dict['stage3_unit2_prelu2']['weights']))
     stage3_unit2_conv2_pad = F.pad(stage3_unit2_prelu2, (1, 1, 1, 1))
     stage3_unit2_conv2 = self.stage3_unit2_conv2(stage3_unit2_conv2_pad)
     stage3_unit2_bn4 = self.stage3_unit2_bn4(stage3_unit2_conv2)
     plus4 = stage3_unit2_bn4 + plus3
     stage3_unit3_bn1 = self.stage3_unit3_bn1(plus4)
     stage3_unit3_conv1_pad = F.pad(stage3_unit3_bn1, (1, 1, 1, 1))
     stage3_unit3_conv1 = self.stage3_unit3_conv1(stage3_unit3_conv1_pad)
     stage3_unit3_bn2 = self.stage3_unit3_bn2(stage3_unit3_conv1)
     stage3_unit3_prelu2 = F.prelu(
         stage3_unit3_bn2,
         torch.from_numpy(
             self.weights_dict['stage3_unit3_prelu2']['weights']))
     stage3_unit3_conv2_pad = F.pad(stage3_unit3_prelu2, (1, 1, 1, 1))
     stage3_unit3_conv2 = self.stage3_unit3_conv2(stage3_unit3_conv2_pad)
     stage3_unit3_bn4 = self.stage3_unit3_bn4(stage3_unit3_conv2)
     plus5 = stage3_unit3_bn4 + plus4
     stage3_unit4_bn1 = self.stage3_unit4_bn1(plus5)
     stage3_unit4_conv1_pad = F.pad(stage3_unit4_bn1, (1, 1, 1, 1))
     stage3_unit4_conv1 = self.stage3_unit4_conv1(stage3_unit4_conv1_pad)
     stage3_unit4_bn2 = self.stage3_unit4_bn2(stage3_unit4_conv1)
     stage3_unit4_prelu2 = F.prelu(
         stage3_unit4_bn2,
         torch.from_numpy(
             self.weights_dict['stage3_unit4_prelu2']['weights']))
     stage3_unit4_conv2_pad = F.pad(stage3_unit4_prelu2, (1, 1, 1, 1))
     stage3_unit4_conv2 = self.stage3_unit4_conv2(stage3_unit4_conv2_pad)
     stage3_unit4_bn4 = self.stage3_unit4_bn4(stage3_unit4_conv2)
     plus6 = stage3_unit4_bn4 + plus5
     stage3_unit5_bn1 = self.stage3_unit5_bn1(plus6)
     stage3_unit5_conv1_pad = F.pad(stage3_unit5_bn1, (1, 1, 1, 1))
     stage3_unit5_conv1 = self.stage3_unit5_conv1(stage3_unit5_conv1_pad)
     stage3_unit5_bn2 = self.stage3_unit5_bn2(stage3_unit5_conv1)
     stage3_unit5_prelu2 = F.prelu(
         stage3_unit5_bn2,
         torch.from_numpy(
             self.weights_dict['stage3_unit5_prelu2']['weights']))
     stage3_unit5_conv2_pad = F.pad(stage3_unit5_prelu2, (1, 1, 1, 1))
     stage3_unit5_conv2 = self.stage3_unit5_conv2(stage3_unit5_conv2_pad)
     stage3_unit5_bn4 = self.stage3_unit5_bn4(stage3_unit5_conv2)
     plus7 = stage3_unit5_bn4 + plus6
     stage4_unit1_bn1 = self.stage4_unit1_bn1(plus7)
     stage4_unit1_convr = self.stage4_unit1_convr(plus7)
     stage4_unit1_conv1_pad = F.pad(stage4_unit1_bn1, (1, 1, 1, 1))
     stage4_unit1_conv1 = self.stage4_unit1_conv1(stage4_unit1_conv1_pad)
     stage4_unit1_bnr = self.stage4_unit1_bnr(stage4_unit1_convr)
     stage4_unit1_bn2 = self.stage4_unit1_bn2(stage4_unit1_conv1)
     stage4_unit1_prelu2 = F.prelu(
         stage4_unit1_bn2,
         torch.from_numpy(
             self.weights_dict['stage4_unit1_prelu2']['weights']))
     stage4_unit1_conv2_pad = F.pad(stage4_unit1_prelu2, (1, 1, 1, 1))
     stage4_unit1_conv2 = self.stage4_unit1_conv2(stage4_unit1_conv2_pad)
     stage4_unit1_bn4 = self.stage4_unit1_bn4(stage4_unit1_conv2)
     plus8 = stage4_unit1_bn4 + stage4_unit1_bnr
     stage4_unit2_bn1 = self.stage4_unit2_bn1(plus8)
     stage4_unit2_conv1_pad = F.pad(stage4_unit2_bn1, (1, 1, 1, 1))
     stage4_unit2_conv1 = self.stage4_unit2_conv1(stage4_unit2_conv1_pad)
     stage4_unit2_bn2 = self.stage4_unit2_bn2(stage4_unit2_conv1)
     stage4_unit2_prelu2 = F.prelu(
         stage4_unit2_bn2,
         torch.from_numpy(
             self.weights_dict['stage4_unit2_prelu2']['weights']))
     stage4_unit2_conv2_pad = F.pad(stage4_unit2_prelu2, (1, 1, 1, 1))
     stage4_unit2_conv2 = self.stage4_unit2_conv2(stage4_unit2_conv2_pad)
     stage4_unit2_bn4 = self.stage4_unit2_bn4(stage4_unit2_conv2)
     plus9 = stage4_unit2_bn4 + plus8
     out_bn2 = self.out_bn2(plus9)
     out_relu2 = F.prelu(
         out_bn2,
         torch.from_numpy(self.weights_dict['out_relu2']['weights']))
     out_conv1_pad = F.pad(out_relu2, (1, 1, 1, 1))
     out_conv1 = self.out_conv1(out_conv1_pad)
     out_bn3 = self.out_bn3(out_conv1)
     out_relu3 = F.relu(out_bn3)
     out_fc1 = self.out_fc1(out_relu3.view(out_relu3.size(0), -1))
     out_embedding = self.out_embedding(out_fc1)
     return out_embedding