def forward(self, x): output = self.conv_1(x) output = self.maxpool_1(output) output = F.local_response_norm(output, size=5) output = self.conv_2_1(output) output = self.conv_2_2(output) output = F.local_response_norm(output, size=5) output = self.maxpool_2(output) output = self.inception_3a(output) output = self.inception_3b(output) output = self.maxpool_3(output) output = self.inception_4a(output) if self.training: output1 = self.avgpool_4a(output) output1 = self.auxiliary_classifier_1(output1) output = self.inception_4b(output) output = self.inception_4c(output) output = self.inception_4d(output) if self.training: output2 = self.avgpool_4d(output) output2 = self.auxiliary_classifier_2(output2) output = self.inception_4e(output) output = self.maxpool_4(output) output = self.inception_5a(output) output = self.inception_5b(output) output = self.classifier(output) if self.training: return output, output1, output2 return output
def forward(self, x): # torch.Size([N, C, H, W]) #print(x.size()) x = F.relu(self.conv1(x)) x = F.local_response_norm(x, size=4) x = self.pool1(x) x = F.relu(self.conv2(x)) x = F.local_response_norm(x, size=4) x = self.pool2(x) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.relu(self.conv5(x)) spp = spatial_pyramid_pooling( x, # 이전 레이어의 outut x.size(0), # N [int(x.size(2)), int(x.size(3))], # H, W self.output_num) #[4, 2, 1] fc1 = F.relu(self.fc1(spp)) #1x5376인 상태이다. fc2 = F.relu(self.fc2(fc1)) output = self.out(fc2) return output
def forward(self, x): # 第一层 x = F.local_response_norm(F.max_pool2d(F.relu(self.conv1(x)), kernel_size=3, stride=2), 4, alpha=0.001 / 9.0, beta=0.75) # 第二层 x = F.local_response_norm(F.max_pool2d(F.relu(self.conv2(x)), kernel_size=3, stride=2), 4, alpha=0.001 / 9.0, beta=0.75) # 第三层 x = F.relu(self.conv3(x)) # 第四层 x = F.relu(self.conv4(x)) # 第五层 x = F.max_pool2d(F.relu(self.conv5(x)), (3, 3), stride=2) #数据转化成一维向量 x = x.view(x.size(0), -1) # 第六层 x = F.dropout(F.relu(self.fc1(x)), p=0.5) # 第七层 x = F.dropout(F.relu(self.fc2(x)), p=0.5) # 第八层 x = F.softmax(self.fc3(x)) print(x.size()) return x
def forward(self, x): conv1_pad = F.pad(x, (0, 1, 0, 1)) conv1 = self.conv1(conv1_pad) relu1 = F.relu(conv1) pool1_pad = F.pad(relu1, (0, 1, 0, 1), value=float('-inf')) pool1 = F.max_pool2d(pool1_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) norm1 = F.local_response_norm(pool1, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) conv2_pad = F.pad(norm1, (2, 2, 2, 2)) conv2 = self.conv2(conv2_pad) relu2 = F.relu(conv2) pool2_pad = F.pad(relu2, (0, 1, 0, 1), value=float('-inf')) pool2 = F.max_pool2d(pool2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) norm2 = F.local_response_norm(pool2, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) conv3_pad = F.pad(norm2, (1, 1, 1, 1)) conv3 = self.conv3(conv3_pad) relu3 = F.relu(conv3) pool5_pad = F.pad(relu3, (0, 1, 0, 1), value=float('-inf')) pool5 = F.max_pool2d(pool5_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) fc6_0 = pool5.view(pool5.size(0), -1) fc6_1 = self.fc6_1(fc6_0) relu6 = F.relu(fc6_1) drop6 = F.dropout(input=relu6, p=0.5, training=self.training, inplace=True) fc7_0 = drop6.view(drop6.size(0), -1) fc7_1 = self.fc7_1(fc7_0) relu7 = F.relu(fc7_1) drop7 = F.dropout(input=relu7, p=0.5, training=self.training, inplace=True) fc8_0 = drop7.view(drop7.size(0), -1) fc8_1 = self.fc8_1(fc8_0) prob = F.softmax(fc8_1) return prob
def forward(self, x): conv1 = self.c1(x) conv2 = self.c2(F.local_response_norm(conv1, 5)) conv3 = self.c3(F.local_response_norm(conv2, 5)) conv4 = self.c4(conv3) conv5 = self.c5(conv4) fc6 = self.fc6(conv5.view(-1, 9216)) fc7 = self.fc7(fc6) fc8 = self.fc8(fc7) return fc8
def forward(self, x, training=False): x = F.max_pool2d(F.local_response_norm(F.relu(self.conv1(x)), 5, k=2), 3, stride=2) x = F.max_pool2d(F.local_response_norm(F.relu(self.conv2(x)), 5, k=2), 3, stride=2) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.max_pool2d(F.relu(self.conv5(x)), 3, stride=2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(F.dropout(self.fc6(x), training=training)) x = F.relu(F.dropout(self.fc7(x), training=training)) x = self.fc8(x) return x
def __init__(self, in_channels, out_channels, use_eql): """ constructor for the class :param in_channels: number of input channels to the block :param out_channels: number of output channels required :param use_eql: whether to use equalized learning rate """ from torch.nn import LeakyReLU, Upsample from torch.nn.functional import local_response_norm super(GenGeneralConvBlock, self).__init__() self.upsample = Upsample(scale_factor=2) if use_eql: self.conv_1 = _equalized_conv2d(in_channels, out_channels, (3, 3), pad=1, bias=True) self.conv_2 = _equalized_conv2d(out_channels, out_channels, (3, 3), pad=1, bias=True) else: from torch.nn import Conv2d self.conv_1 = Conv2d(in_channels, out_channels, (3, 3), padding=1, bias=True) self.conv_2 = Conv2d(out_channels, out_channels, (3, 3), padding=1, bias=True) # Pixelwise feature vector normalization operation self.pixNorm = lambda x: local_response_norm(x, 2 * x.shape[1], alpha=2, beta=0.5, k=1e-8) # leaky_relu: self.lrelu = LeakyReLU(0.2)
def __init__(self, in_channels, use_eql): """ constructor for the inner class :param in_channels: number of input channels to the block :param use_eql: whether to use equalized learning rate """ from torch.nn import LeakyReLU from torch.nn.functional import local_response_norm super(GenInitialBlock, self).__init__() if use_eql: self.conv_1 = _equalized_deconv2d(in_channels, in_channels, (4, 4), bias=True) self.conv_2 = _equalized_conv2d(in_channels, in_channels, (3, 3), pad=1, bias=True) else: from torch.nn import Conv2d, ConvTranspose2d self.conv_1 = ConvTranspose2d(in_channels, in_channels, (4, 4), bias=True) self.conv_2 = Conv2d(in_channels, in_channels, (3, 3), padding=1, bias=True) # Pixelwise feature vector normalization operation self.pixNorm = lambda x: local_response_norm(x, 2 * x.shape[1], alpha=2, beta=0.5, k=1e-8) # leaky_relu: self.lrelu = LeakyReLU(0.2)
def forward(self, x): """ Forward propagates the network given an input batch :param x: Inputs x (b, c, h, w) :return: preds (b, num_classes) """ # print(x.shape) x = F.relu(self.layer_dict['conv_1'](x)) x = F.local_response_norm(x, size=5, alpha=0.0001, beta=0.75, k=2.0) x = F.max_pool2d(x, kernel_size=3, stride=2) # print(x.shape) x = F.relu(self.layer_dict['conv_2'](x)) if self.is_spatial: x = F.local_response_norm(x, size=5, alpha=0.0001, beta=0.75, k=2.0) x = F.max_pool2d(x, kernel_size=3, stride=2) x = F.relu(self.layer_dict['conv_3'](x)) x = F.relu(self.layer_dict['conv_4'](x)) x = F.relu(self.layer_dict['conv_5'](x)) x = F.max_pool2d(x, kernel_size=3, stride=2) # print(x.shape) x = x.view(x.shape[0], -1) # flatten x from (b, c, h, w) to (b, c*h*w) # print(x.shape) x = F.relu(self.layer_dict['fc_1'](x)) x = F.dropout(x, p=self.dropout_rate, training=self.training) # print(x.shape) x = F.relu(self.layer_dict['fc_2'](x)) x = F.dropout(x, p=self.dropout_rate, training=self.training) # print(x.shape) x = self.layer_dict['logits'](x) # print(x.shape) # # focus training on conv layers # x = x.view(x.shape[0], -1) # flatten x from (b, c, h, w) to (b, c*h*w) # # print(x.shape) # x = self.layer_dict['logits'](x) # # print(x.shape) return x
def forward(self, x): #batch_size = 256 batch_size = x.size(0) #batch*3*224*224 -> batch*96*109*109 out = self.conv1(x) #out = F.batch_norm(out) out = F.relu(out) #batch*96*109*109 -> batch*96*54*54 out = F.max_pool2d(out, 3, 2) out = F.local_response_norm(out, 2) #batch*96*54*54 -> batch*256*25*25 out = self.conv2(out) out = F.relu(out) #batch*256*25*25 -> batch*256*12*12 out = F.max_pool2d(out, 3, 2) out = F.local_response_norm(out, 2) #batch*256*12*12 -> batch*512*10*10 out = self.conv3(out) out = F.relu(out) #batch*512*10*10 -> batch*512*8*8 out = self.conv4(out) out = F.relu(out) #batch*512*8*8 -> batch*512*6*6 out = self.conv5(out) out = F.relu(out) #batch*512*6*6 -> batch*512*2*2 out = F.max_pool2d(out, 3, 2) # 展平数据 out = out.view(batch_size, -1) out = self.fc1(out) out = F.dropout(out) out = self.fc2(out) out = F.dropout(out) out = self.fc3(out) return out
def __init__(self, noise_size=512): """ constructor of the class :param noise_size: dimensionality of the input prior Z """ super(Generator, self).__init__() # super constructor call # define the state of the object self.z_size = noise_size # define all the required modules for the generator from torch.nn import ConvTranspose2d, Conv2d, Upsample, LeakyReLU from torch.nn.functional import local_response_norm ch = self.z_size # Layer 1: self.conv_1_1 = ConvTranspose2d(ch, ch, (4, 4)) self.conv_1_2 = Conv2d(ch, ch, (3, 3), padding=1) # Layer 2: self.conv_2_1 = Conv2d(ch, ch, (3, 3), padding=1) self.conv_2_2 = Conv2d(ch, ch, (3, 3), padding=1) # Layer 3: self.conv_3_1 = Conv2d(ch, ch, (3, 3), padding=1) self.conv_3_2 = Conv2d(ch, ch, (3, 3), padding=1) # Layer 4: self.conv_4_1 = Conv2d(ch, ch, (3, 3), padding=1) self.conv_4_2 = Conv2d(ch, ch, (3, 3), padding=1) # Layer 5: self.conv_5_1 = Conv2d(ch, ch // 2, (3, 3), padding=1) self.conv_5_2 = Conv2d(ch // 2, ch // 2, (3, 3), padding=1) # Layer 6: self.conv_6_1 = Conv2d(ch // 2, ch // 4, (3, 3), padding=1) self.conv_6_2 = Conv2d(ch // 4, ch // 4, (3, 3), padding=1) # Upsampler self.upsample = Upsample(scale_factor=2) # To RGB converter operation: self.ToRGB = Conv2d(ch // 4, 3, (1, 1), bias=False) # Pixelwise feature vector normalization operation self.pixNorm = lambda x: local_response_norm(x, 2*x.shape[1], alpha=2, beta=0.5, k=1e-8) # Leaky Relu to be applied as activation self.lrelu = LeakyReLU(negative_slope=0.2)
def forward(self, x): x = F.relu(self.conv1(x)) x = F.local_response_norm(x, size=4) x = self.pool1(x) x = F.relu(self.conv2(x)) x = F.local_response_norm(x, size=4) x = self.pool2(x) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.relu(self.conv5(x)) spp = spatial_pyramid_pool( x, x.size(0), [int(x.size(2)), int(x.size(3))], self.output_num) fc1 = F.relu(self.fc1(spp)) fc2 = F.relu(self.fc2(fc1)) output = self.out(fc2) return output
def __init__(self, in_channels): from torch.nn import ConvTranspose2d, Conv2d, LeakyReLU from torch.nn.functional import local_response_norm super().__init__() self.conv_1 = ConvTranspose2d(in_channels, in_channels, (4, 4)) self.conv_2 = Conv2d(in_channels, in_channels, (3, 3), padding=1) # Pixelwise feature vector normalization operation self.pixNorm = lambda x: local_response_norm( x, 2 * x.shape[1], alpha=2, beta=0.5, k=1e-8) # leaky_relu: self.lrelu = LeakyReLU(0.2)
def forward(self, x, y, z): x = F.local_response_norm(x, 3) x = F.local_response_norm(x, size=5, alpha=0.001, beta=0.8, k=0.9) y = F.local_response_norm(y, 4) y = F.local_response_norm(y, size=4, alpha=0.01, beta=0.2, k=1.9) z = F.local_response_norm(z, 5) z = F.local_response_norm(z, size=3, alpha=0.1, beta=0.3, k=0.2) return x, y, z
def __init__(self, in_channels, out_channels): """ constructor for the class :param in_channels: number of input channels to the block :param out_channels: number of output channels required """ from torch.nn import Conv2d, LeakyReLU, Upsample from torch.nn.functional import local_response_norm super().__init__() self.upsample = Upsample(scale_factor=2) self.conv_1 = Conv2d(in_channels, out_channels, (3, 3), padding=1) self.conv_2 = Conv2d(out_channels, out_channels, (3, 3), padding=1) # Pixelwise feature vector normalization operation self.pixNorm = lambda x: local_response_norm( x, 2 * x.shape[1], alpha=2, beta=0.5, k=1e-8) # leaky_relu: self.lrelu = LeakyReLU(0.2)
def visit_lrn(self, op, network: PyTorchNetwork): X = network.fetch_tensor_internal(op.i_X) result = F.local_response_norm(X, op.size.value, op.alpha.value, op.beta.value, op.bias.value) network.feed_tensor(op.o_Y, result)
def forward(self, x): x = F.local_response_norm(x, 4) x = F.local_response_norm(x, size=4, alpha=0.001, beta=0.2, k=1.9) return x
def forward1(self, x): conv1_7x7_s2_pad = F.pad(x, (3, 3, 3, 3)) conv1_7x7_s2 = self.conv1_7x7_s2(conv1_7x7_s2_pad) conv1_relu_7x7 = F.relu(conv1_7x7_s2) pool1_3x3_s2_pad = F.pad(conv1_relu_7x7, (0, 1, 0, 1), value=float('-inf')) pool1_3x3_s2, pool1_3x3_s2_idx = F.max_pool2d(pool1_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True) pool1_norm1 = F.local_response_norm(pool1_3x3_s2, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) conv2_3x3_reduce = self.conv2_3x3_reduce(pool1_norm1) conv2_relu_3x3_reduce = F.relu(conv2_3x3_reduce) conv2_3x3_pad = F.pad(conv2_relu_3x3_reduce, (1, 1, 1, 1)) conv2_3x3 = self.conv2_3x3(conv2_3x3_pad) conv2_relu_3x3 = F.relu(conv2_3x3) conv2_norm2 = F.local_response_norm(conv2_relu_3x3, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) pool2_3x3_s2_pad = F.pad(conv2_norm2, (0, 1, 0, 1), value=float('-inf')) pool2_3x3_s2, pool2_3x3_s2_idx = F.max_pool2d(pool2_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True) inception_3a_1x1 = self.inception_3a_1x1(pool2_3x3_s2) inception_3a_3x3_reduce = self.inception_3a_3x3_reduce(pool2_3x3_s2) inception_3a_5x5_reduce = self.inception_3a_5x5_reduce(pool2_3x3_s2) inception_3a_pool_pad = F.pad(pool2_3x3_s2, (1, 1, 1, 1), value=float('-inf')) inception_3a_pool, inception_3a_pool_idx = F.max_pool2d(inception_3a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) inception_3a_relu_1x1 = F.relu(inception_3a_1x1) inception_3a_relu_3x3_reduce = F.relu(inception_3a_3x3_reduce) inception_3a_relu_5x5_reduce = F.relu(inception_3a_5x5_reduce) inception_3a_pool_proj = self.inception_3a_pool_proj(inception_3a_pool) inception_3a_3x3_pad = F.pad(inception_3a_relu_3x3_reduce, (1, 1, 1, 1)) inception_3a_3x3 = self.inception_3a_3x3(inception_3a_3x3_pad) inception_3a_5x5_pad = F.pad(inception_3a_relu_5x5_reduce, (2, 2, 2, 2)) inception_3a_5x5 = self.inception_3a_5x5(inception_3a_5x5_pad) inception_3a_relu_pool_proj = F.relu(inception_3a_pool_proj) inception_3a_relu_3x3 = F.relu(inception_3a_3x3) inception_3a_relu_5x5 = F.relu(inception_3a_5x5) inception_3a_output = torch.cat((inception_3a_relu_1x1, inception_3a_relu_3x3, inception_3a_relu_5x5, inception_3a_relu_pool_proj), 1) # inception_3b_1x1 = self.inception_3b_1x1(inception_3a_output) # inception_3b_3x3_reduce = self.inception_3b_3x3_reduce(inception_3a_output) # inception_3b_5x5_reduce = self.inception_3b_5x5_reduce(inception_3a_output) # inception_3b_pool_pad = F.pad(inception_3a_output, (1, 1, 1, 1), value=float('-inf')) # inception_3b_pool, inception_3b_pool_idx = F.max_pool2d(inception_3b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_3b_relu_1x1 = F.relu(inception_3b_1x1) # inception_3b_relu_3x3_reduce = F.relu(inception_3b_3x3_reduce) # inception_3b_relu_5x5_reduce = F.relu(inception_3b_5x5_reduce) # inception_3b_pool_proj = self.inception_3b_pool_proj(inception_3b_pool) # inception_3b_3x3_pad = F.pad(inception_3b_relu_3x3_reduce, (1, 1, 1, 1)) # inception_3b_3x3 = self.inception_3b_3x3(inception_3b_3x3_pad) # inception_3b_5x5_pad = F.pad(inception_3b_relu_5x5_reduce, (2, 2, 2, 2)) # inception_3b_5x5 = self.inception_3b_5x5(inception_3b_5x5_pad) # inception_3b_relu_pool_proj = F.relu(inception_3b_pool_proj) # inception_3b_relu_3x3 = F.relu(inception_3b_3x3) # inception_3b_relu_5x5 = F.relu(inception_3b_5x5) # inception_3b_output = torch.cat((inception_3b_relu_1x1, inception_3b_relu_3x3, inception_3b_relu_5x5, inception_3b_relu_pool_proj), 1) # pool3_3x3_s2_pad = F.pad(inception_3b_output, (0, 1, 0, 1), value=float('-inf')) # pool3_3x3_s2, pool3_3x3_s2_idx = F.max_pool2d(pool3_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True) # inception_4a_1x1 = self.inception_4a_1x1(pool3_3x3_s2) # inception_4a_3x3_reduce = self.inception_4a_3x3_reduce(pool3_3x3_s2) # inception_4a_5x5_reduce = self.inception_4a_5x5_reduce(pool3_3x3_s2) # inception_4a_pool_pad = F.pad(pool3_3x3_s2, (1, 1, 1, 1), value=float('-inf')) # inception_4a_pool, inception_4a_pool_idx = F.max_pool2d(inception_4a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_4a_relu_1x1 = F.relu(inception_4a_1x1) # inception_4a_relu_3x3_reduce = F.relu(inception_4a_3x3_reduce) # inception_4a_relu_5x5_reduce = F.relu(inception_4a_5x5_reduce) # inception_4a_pool_proj = self.inception_4a_pool_proj(inception_4a_pool) # inception_4a_3x3_pad = F.pad(inception_4a_relu_3x3_reduce, (1, 1, 1, 1)) # inception_4a_3x3 = self.inception_4a_3x3(inception_4a_3x3_pad) # inception_4a_5x5_pad = F.pad(inception_4a_relu_5x5_reduce, (2, 2, 2, 2)) # inception_4a_5x5 = self.inception_4a_5x5(inception_4a_5x5_pad) # inception_4a_relu_pool_proj = F.relu(inception_4a_pool_proj) # inception_4a_relu_3x3 = F.relu(inception_4a_3x3) # inception_4a_relu_5x5 = F.relu(inception_4a_5x5) # inception_4a_output = torch.cat((inception_4a_relu_1x1, inception_4a_relu_3x3, inception_4a_relu_5x5, inception_4a_relu_pool_proj), 1) # inception_4b_1x1 = self.inception_4b_1x1(inception_4a_output) # inception_4b_3x3_reduce = self.inception_4b_3x3_reduce(inception_4a_output) # inception_4b_5x5_reduce = self.inception_4b_5x5_reduce(inception_4a_output) # inception_4b_pool_pad = F.pad(inception_4a_output, (1, 1, 1, 1), value=float('-inf')) # inception_4b_pool, inception_4b_pool_idx = F.max_pool2d(inception_4b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_4b_relu_1x1 = F.relu(inception_4b_1x1) # inception_4b_relu_3x3_reduce = F.relu(inception_4b_3x3_reduce) # inception_4b_relu_5x5_reduce = F.relu(inception_4b_5x5_reduce) # inception_4b_pool_proj = self.inception_4b_pool_proj(inception_4b_pool) # inception_4b_3x3_pad = F.pad(inception_4b_relu_3x3_reduce, (1, 1, 1, 1)) # inception_4b_3x3 = self.inception_4b_3x3(inception_4b_3x3_pad) # inception_4b_5x5_pad = F.pad(inception_4b_relu_5x5_reduce, (2, 2, 2, 2)) # inception_4b_5x5 = self.inception_4b_5x5(inception_4b_5x5_pad) # inception_4b_relu_pool_proj = F.relu(inception_4b_pool_proj) # inception_4b_relu_3x3 = F.relu(inception_4b_3x3) # inception_4b_relu_5x5 = F.relu(inception_4b_5x5) # inception_4b_output = torch.cat((inception_4b_relu_1x1, inception_4b_relu_3x3, inception_4b_relu_5x5, inception_4b_relu_pool_proj), 1) # inception_4c_1x1 = self.inception_4c_1x1(inception_4b_output) # inception_4c_3x3_reduce = self.inception_4c_3x3_reduce(inception_4b_output) # inception_4c_5x5_reduce = self.inception_4c_5x5_reduce(inception_4b_output) # inception_4c_pool_pad = F.pad(inception_4b_output, (1, 1, 1, 1), value=float('-inf')) # inception_4c_pool, inception_4c_pool_idx = F.max_pool2d(inception_4c_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_4c_relu_1x1 = F.relu(inception_4c_1x1) # inception_4c_relu_3x3_reduce = F.relu(inception_4c_3x3_reduce) # inception_4c_relu_5x5_reduce = F.relu(inception_4c_5x5_reduce) # inception_4c_pool_proj = self.inception_4c_pool_proj(inception_4c_pool) # inception_4c_3x3_pad = F.pad(inception_4c_relu_3x3_reduce, (1, 1, 1, 1)) # inception_4c_3x3 = self.inception_4c_3x3(inception_4c_3x3_pad) # inception_4c_5x5_pad = F.pad(inception_4c_relu_5x5_reduce, (2, 2, 2, 2)) # inception_4c_5x5 = self.inception_4c_5x5(inception_4c_5x5_pad) # inception_4c_relu_pool_proj = F.relu(inception_4c_pool_proj) # inception_4c_relu_3x3 = F.relu(inception_4c_3x3) # inception_4c_relu_5x5 = F.relu(inception_4c_5x5) # inception_4c_output = torch.cat((inception_4c_relu_1x1, inception_4c_relu_3x3, inception_4c_relu_5x5, inception_4c_relu_pool_proj), 1) # inception_4d_1x1 = self.inception_4d_1x1(inception_4c_output) # inception_4d_3x3_reduce = self.inception_4d_3x3_reduce(inception_4c_output) # inception_4d_5x5_reduce = self.inception_4d_5x5_reduce(inception_4c_output) # inception_4d_pool_pad = F.pad(inception_4c_output, (1, 1, 1, 1), value=float('-inf')) # inception_4d_pool, inception_4d_pool_idx = F.max_pool2d(inception_4d_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_4d_relu_1x1 = F.relu(inception_4d_1x1) # inception_4d_relu_3x3_reduce = F.relu(inception_4d_3x3_reduce) # inception_4d_relu_5x5_reduce = F.relu(inception_4d_5x5_reduce) # inception_4d_pool_proj = self.inception_4d_pool_proj(inception_4d_pool) # inception_4d_3x3_pad = F.pad(inception_4d_relu_3x3_reduce, (1, 1, 1, 1)) # inception_4d_3x3 = self.inception_4d_3x3(inception_4d_3x3_pad) # inception_4d_5x5_pad = F.pad(inception_4d_relu_5x5_reduce, (2, 2, 2, 2)) # inception_4d_5x5 = self.inception_4d_5x5(inception_4d_5x5_pad) # inception_4d_relu_pool_proj = F.relu(inception_4d_pool_proj) # inception_4d_relu_3x3 = F.relu(inception_4d_3x3) # inception_4d_relu_5x5 = F.relu(inception_4d_5x5) # inception_4d_output = torch.cat((inception_4d_relu_1x1, inception_4d_relu_3x3, inception_4d_relu_5x5, inception_4d_relu_pool_proj), 1) # inception_4e_1x1 = self.inception_4e_1x1(inception_4d_output) # inception_4e_3x3_reduce = self.inception_4e_3x3_reduce(inception_4d_output) # inception_4e_5x5_reduce = self.inception_4e_5x5_reduce(inception_4d_output) # inception_4e_pool_pad = F.pad(inception_4d_output, (1, 1, 1, 1), value=float('-inf')) # inception_4e_pool, inception_4e_pool_idx = F.max_pool2d(inception_4e_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_4e_relu_1x1 = F.relu(inception_4e_1x1) # inception_4e_relu_3x3_reduce = F.relu(inception_4e_3x3_reduce) # inception_4e_relu_5x5_reduce = F.relu(inception_4e_5x5_reduce) # inception_4e_pool_proj = self.inception_4e_pool_proj(inception_4e_pool) # inception_4e_3x3_pad = F.pad(inception_4e_relu_3x3_reduce, (1, 1, 1, 1)) # inception_4e_3x3 = self.inception_4e_3x3(inception_4e_3x3_pad) # inception_4e_5x5_pad = F.pad(inception_4e_relu_5x5_reduce, (2, 2, 2, 2)) # inception_4e_5x5 = self.inception_4e_5x5(inception_4e_5x5_pad) # inception_4e_relu_pool_proj = F.relu(inception_4e_pool_proj) # inception_4e_relu_3x3 = F.relu(inception_4e_3x3) # inception_4e_relu_5x5 = F.relu(inception_4e_5x5) # inception_4e_output = torch.cat((inception_4e_relu_1x1, inception_4e_relu_3x3, inception_4e_relu_5x5, inception_4e_relu_pool_proj), 1) # pool4_3x3_s2_pad = F.pad(inception_4e_output, (0, 1, 0, 1), value=float('-inf')) # pool4_3x3_s2, pool4_3x3_s2_idx = F.max_pool2d(pool4_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True) # inception_5a_1x1 = self.inception_5a_1x1(pool4_3x3_s2) # inception_5a_3x3_reduce = self.inception_5a_3x3_reduce(pool4_3x3_s2) # inception_5a_5x5_reduce = self.inception_5a_5x5_reduce(pool4_3x3_s2) # inception_5a_pool_pad = F.pad(pool4_3x3_s2, (1, 1, 1, 1), value=float('-inf')) # inception_5a_pool, inception_5a_pool_idx = F.max_pool2d(inception_5a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_5a_relu_1x1 = F.relu(inception_5a_1x1) # inception_5a_relu_3x3_reduce = F.relu(inception_5a_3x3_reduce) # inception_5a_relu_5x5_reduce = F.relu(inception_5a_5x5_reduce) # inception_5a_pool_proj = self.inception_5a_pool_proj(inception_5a_pool) # inception_5a_3x3_pad = F.pad(inception_5a_relu_3x3_reduce, (1, 1, 1, 1)) # inception_5a_3x3 = self.inception_5a_3x3(inception_5a_3x3_pad) # inception_5a_5x5_pad = F.pad(inception_5a_relu_5x5_reduce, (2, 2, 2, 2)) # inception_5a_5x5 = self.inception_5a_5x5(inception_5a_5x5_pad) # inception_5a_relu_pool_proj = F.relu(inception_5a_pool_proj) # inception_5a_relu_3x3 = F.relu(inception_5a_3x3) # inception_5a_relu_5x5 = F.relu(inception_5a_5x5) # inception_5a_output = torch.cat((inception_5a_relu_1x1, inception_5a_relu_3x3, inception_5a_relu_5x5, inception_5a_relu_pool_proj), 1) # inception_5b_1x1 = self.inception_5b_1x1(inception_5a_output) # inception_5b_3x3_reduce = self.inception_5b_3x3_reduce(inception_5a_output) # inception_5b_5x5_reduce = self.inception_5b_5x5_reduce(inception_5a_output) # inception_5b_pool_pad = F.pad(inception_5a_output, (1, 1, 1, 1), value=float('-inf')) # inception_5b_pool, inception_5b_pool_idx = F.max_pool2d(inception_5b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True) # inception_5b_relu_1x1 = F.relu(inception_5b_1x1) # inception_5b_relu_3x3_reduce = F.relu(inception_5b_3x3_reduce) # inception_5b_relu_5x5_reduce = F.relu(inception_5b_5x5_reduce) # inception_5b_pool_proj = self.inception_5b_pool_proj(inception_5b_pool) # inception_5b_3x3_pad = F.pad(inception_5b_relu_3x3_reduce, (1, 1, 1, 1)) # inception_5b_3x3 = self.inception_5b_3x3(inception_5b_3x3_pad) # inception_5b_5x5_pad = F.pad(inception_5b_relu_5x5_reduce, (2, 2, 2, 2)) # inception_5b_5x5 = self.inception_5b_5x5(inception_5b_5x5_pad) # inception_5b_relu_pool_proj = F.relu(inception_5b_pool_proj) # inception_5b_relu_3x3 = F.relu(inception_5b_3x3) # inception_5b_relu_5x5 = F.relu(inception_5b_5x5) # inception_5b_output = torch.cat((inception_5b_relu_1x1, inception_5b_relu_3x3, inception_5b_relu_5x5, inception_5b_relu_pool_proj), 1) # pool5_7x7_s1 = F.avg_pool2d(inception_5b_output, kernel_size=(7, 7), stride=(1, 1), padding=(0,), ceil_mode=False, count_include_pad=False) # pool5_drop_7x7_s1 = F.dropout(input = pool5_7x7_s1, p = 0.4000000059604645, training = self.training, inplace = True) # loss3_classifier_0 = pool5_drop_7x7_s1.view(pool5_drop_7x7_s1.size(0), -1) # loss3_classifier_1 = self.loss3_classifier_1(loss3_classifier_0) # prob = F.softmax(loss3_classifier_1) # return prob return inception_3a_output
def forward(self, x): x = super(AlexConv, self).forward(x) if self.use_lrn: x = F.local_response_norm(x, size=5, k=2.0) return x
def __init__(self): """ constructor for the class """ super(Discriminator, self).__init__() # super constructor call # define all the required modules for the generator from torch.nn import Conv2d, LeakyReLU, AvgPool2d from torch.nn.functional import local_response_norm channels = 3 # for RGB images net_ch = 128 # Layer 1: self.conv_1_1 = Conv2d(channels, net_ch, (1, 1), bias=False) self.conv_1_2 = Conv2d(net_ch, net_ch, (3, 3), padding=1, bias=False) self.conv_1_3 = Conv2d(net_ch, 2 * net_ch, (3, 3), padding=1, bias=False) # Layer 2: self.conv_2_1 = Conv2d(2 * net_ch, 2 * net_ch, (3, 3), padding=1, bias=False) self.conv_2_2 = Conv2d(2 * net_ch, 4 * net_ch, (3, 3), padding=1, bias=False) # fixing number of channels hereon ... fix_channel = 4 * net_ch # Layer 3: self.conv_3_1 = Conv2d(fix_channel, fix_channel, (3, 3), padding=1, bias=False) self.conv_3_2 = Conv2d(fix_channel, fix_channel, (3, 3), padding=1, bias=False) # Layer 4: self.conv_4_1 = Conv2d(fix_channel, fix_channel, (3, 3), padding=1, bias=False) self.conv_4_2 = Conv2d(fix_channel, fix_channel, (3, 3), padding=1, bias=False) # Layer 6: (conv_6_1 has +1 due to min_std layer) self.conv_6_1 = Conv2d(fix_channel + 1, fix_channel, (3, 3), padding=1, bias=False) self.conv_6_2 = Conv2d(fix_channel, fix_channel, (3, 3), padding=1, bias=False) self.conv_6_3 = Conv2d(fix_channel, fix_channel, (4, 4), bias=False) self.conv_6_4 = Conv2d(fix_channel, 1, (1, 1), bias=False) # Downsampler (Average pooling) self.downsample = AvgPool2d(2) # create the miniBatch stddev layer: self.min_std = self.MinibatchStdDev() # Pixelwise feature vector normalization operation self.pixNorm = lambda x: local_response_norm( x, 2 * x.shape[1], alpha=2, beta=0.5, k=1e-8) # Leaky Relu to be applied as activation self.lrelu = LeakyReLU(negative_slope=0.2)
def forward(self, x): if x.is_cuda: x = x - self.mean.cuda() else: x = x - self.mean dim = [int(self.pad1[2]), int(self.pad1[3]), int(self.pad1[0]), int(self.pad1[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.conv2d(x, self.conv1_w, self.conv1_b, stride=int(self.stride1[0])) x = F.relu(x) x = F.local_response_norm(x, 2, alpha=0.0001, beta=0.75, k=2.000) # 池化1 # pytorch中kernel_size为max_pool窗口大小, stride为滑动窗口大小 dim = [int(self.pad_pool1[2]), int(self.pad_pool1[3]), int(self.pad_pool1[0]), int(self.pad_pool1[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.max_pool2d(x, kernel_size=(int(self.area_pool1[0]), int(self.area_pool1[1])), stride=(int(self.stride_pool1[0]), int(self.stride_pool1[1]))) dim = [int(self.pad2[2]), int(self.pad2[3]), int(self.pad2[0]), int(self.pad2[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.conv2d(x, self.conv2_w, self.conv2_b, stride=int(self.stride2[0])) x = F.relu(x) x = F.local_response_norm(x, 2, alpha=0.0001, beta=0.75, k=2.000) dim = [int(self.pad_pool2[2]), int(self.pad_pool2[3]), int(self.pad_pool2[0]), int(self.pad_pool2[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.max_pool2d(x, kernel_size=(int(self.area_pool2[0]), int(self.area_pool2[1])), stride=(int(self.stride_pool2[0]), int(self.stride_pool2[1]))) dim = [int(self.pad3[2]), int(self.pad3[3]), int(self.pad3[0]), int(self.pad3[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.conv2d(x, self.conv3_w, self.conv3_b, stride=int(self.stride3[0])) x = F.relu(x) dim = [int(self.pad4[2]), int(self.pad4[3]), int(self.pad4[0]), int(self.pad4[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.conv2d(x, self.conv4_w, self.conv4_b, stride=int(self.stride4[0])) x = F.relu(x) dim = [int(self.pad5[2]), int(self.pad5[3]), int(self.pad5[0]), int(self.pad5[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.conv2d(x, self.conv5_w, self.conv5_b, stride=int(self.stride5[0])) x = F.relu(x) dim = [int(self.pad_pool5[2]), int(self.pad_pool5[3]), int(self.pad_pool5[0]), int(self.pad_pool5[1]), 0, 0, 0, 0] x = F.pad(x, dim, 'constant') x = F.max_pool2d(x, kernel_size=(int(self.area_pool5[0]), int(self.area_pool5[1])), stride=(int(self.stride_pool5[0]), int(self.stride_pool5[1]))) # fc6和fc7 x = F.conv2d(x, self.fc6_w, self.fc6_b, stride=1) x = F.relu(x) x = F.dropout(x, p=self.keep_prob) x = F.conv2d(x, self.fc7_w, self.fc7_b, stride=1) x = F.relu(x) x = F.dropout(x, p=self.keep_prob) # image_feature x = F.relu(self.fc8(x)) # image hash code hashcode = self.hashlayer(x) return hashcode.squeeze()
def forward(self, x): #conv1_pad = F.pad(x, (0, 1, 0, 1)) conv1 = self.conv1(x) relu1 = F.relu(conv1) #pool1_pad = F.pad(relu1, (0, 1, 0, 1), value=float('-inf')) pool1, pool1_idx = F.max_pool2d(relu1, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=True, return_indices=True) norm1 = F.local_response_norm(pool1, size=5, alpha=1e-04, beta=0.75, k=1.0) conv2_pad = F.pad(norm1, (2, 2, 2, 2)) conv2 = self.conv2(conv2_pad) relu2 = F.relu(conv2) #pool2_pad = F.pad(relu2, (0, 1, 0, 1), value=float('-inf')) pool2, pool2_idx = F.max_pool2d(relu2, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=True, return_indices=True) norm2 = F.local_response_norm(pool2, size=5, alpha=1e-04, beta=0.75, k=1.0) conv3_pad = F.pad(norm2, (1, 1, 1, 1)) conv3 = self.conv3(conv3_pad) relu3 = F.relu(conv3) conv4_pad = F.pad(relu3, (1, 1, 1, 1)) conv4 = self.conv4(conv4_pad) relu4 = F.relu(conv4) conv5_pad = F.pad(relu4, (1, 1, 1, 1)) conv5 = self.conv5(conv5_pad) relu5 = F.relu(conv5) #pool5_pad = F.pad(relu5, (0, 1, 0, 1), value=float('-inf')) pool5, pool5_idx = F.max_pool2d(relu5, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True) fc6_0 = pool5.view(pool5.size(0), -1) fc6_1 = self.fc6_1(fc6_0) relu6 = F.relu(fc6_1) fc7_0 = relu6.view(relu6.size(0), -1) fc7_1 = self.fc7_1(fc7_0) relu7 = F.relu(fc7_1) fc8_0 = relu7.view(relu7.size(0), -1) fc8_1 = self.fc8_1(fc8_0) # get feature if self.feature_name[:2] == 'fc': #self.feature = eval('relu'+self.feature_name[-1]) self.feature = eval(self.feature_name + '_1') else: self.feature = eval(self.feature_name) return self.feature
def forward(self, x): result = F.local_response_norm(x, size=5) result *= 1.0 return result
def forward(self, x): conv2d0_pre_relu_conv_pad = F.pad(x, (2, 3, 2, 3)) conv2d0_pre_relu_conv = self.conv2d0_pre_relu_conv(conv2d0_pre_relu_conv_pad) conv2d0 = self.conv2d0(conv2d0_pre_relu_conv) maxpool0_pad = F.pad(conv2d0, (0, 1, 0, 1), value=float('-inf')) maxpool0 = self.maxpool0(maxpool0_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) localresponsenorm0 = F.local_response_norm(maxpool0, size=9, alpha=9.99999974738e-05, beta=0.5, k=1) conv2d1_pre_relu_conv = self.conv2d1_pre_relu_conv(localresponsenorm0) conv2d1 = self.conv2d1(conv2d1_pre_relu_conv) conv2d2_pre_relu_conv_pad = F.pad(conv2d1, (1, 1, 1, 1)) conv2d2_pre_relu_conv = self.conv2d2_pre_relu_conv(conv2d2_pre_relu_conv_pad) conv2d2 = self.conv2d2(conv2d2_pre_relu_conv) localresponsenorm1 = F.local_response_norm(conv2d2, size=9, alpha=9.99999974738e-05, beta=0.5, k=1) maxpool1_pad = F.pad(localresponsenorm1, (0, 1, 0, 1), value=float('-inf')) maxpool1 = self.maxpool1(maxpool1_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) mixed3a_1x1_pre_relu_conv = self.mixed3a_1x1_pre_relu_conv(maxpool1) mixed3a_3x3_bottleneck_pre_relu_conv = self.mixed3a_3x3_bottleneck_pre_relu_conv(maxpool1) mixed3a_5x5_bottleneck_pre_relu_conv = self.mixed3a_5x5_bottleneck_pre_relu_conv(maxpool1) mixed3a_pool_pad = F.pad(maxpool1, (1, 1, 1, 1), value=float('-inf')) mixed3a_pool = self.mixed3a_pool(mixed3a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed3a_1x1 = self.mixed3a_1x1(mixed3a_1x1_pre_relu_conv) mixed3a_3x3_bottleneck = self.mixed3a_3x3_bottleneck(mixed3a_3x3_bottleneck_pre_relu_conv) mixed3a_5x5_bottleneck = self.mixed3a_5x5_bottleneck(mixed3a_5x5_bottleneck_pre_relu_conv) mixed3a_pool_reduce_pre_relu_conv = self.mixed3a_pool_reduce_pre_relu_conv(mixed3a_pool) mixed3a_3x3_pre_relu_conv_pad = F.pad(mixed3a_3x3_bottleneck, (1, 1, 1, 1)) mixed3a_3x3_pre_relu_conv = self.mixed3a_3x3_pre_relu_conv(mixed3a_3x3_pre_relu_conv_pad) mixed3a_5x5_pre_relu_conv_pad = F.pad(mixed3a_5x5_bottleneck, (2, 2, 2, 2)) mixed3a_5x5_pre_relu_conv = self.mixed3a_5x5_pre_relu_conv(mixed3a_5x5_pre_relu_conv_pad) mixed3a_pool_reduce = self.mixed3a_pool_reduce(mixed3a_pool_reduce_pre_relu_conv) mixed3a_3x3 = self.mixed3a_3x3(mixed3a_3x3_pre_relu_conv) mixed3a_5x5 = self.mixed3a_5x5(mixed3a_5x5_pre_relu_conv) mixed3a = self.mixed3a((mixed3a_1x1, mixed3a_3x3, mixed3a_5x5, mixed3a_pool_reduce), 1) mixed3b_1x1_pre_relu_conv = self.mixed3b_1x1_pre_relu_conv(mixed3a) mixed3b_3x3_bottleneck_pre_relu_conv = self.mixed3b_3x3_bottleneck_pre_relu_conv(mixed3a) mixed3b_5x5_bottleneck_pre_relu_conv = self.mixed3b_5x5_bottleneck_pre_relu_conv(mixed3a) mixed3b_pool_pad = F.pad(mixed3a, (1, 1, 1, 1), value=float('-inf')) mixed3b_pool = self.mixed3b_pool(mixed3b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed3b_1x1 = self.mixed3b_1x1(mixed3b_1x1_pre_relu_conv) mixed3b_3x3_bottleneck = self.mixed3b_3x3_bottleneck(mixed3b_3x3_bottleneck_pre_relu_conv) mixed3b_5x5_bottleneck = self.mixed3b_5x5_bottleneck(mixed3b_5x5_bottleneck_pre_relu_conv) mixed3b_pool_reduce_pre_relu_conv = self.mixed3b_pool_reduce_pre_relu_conv(mixed3b_pool) mixed3b_3x3_pre_relu_conv_pad = F.pad(mixed3b_3x3_bottleneck, (1, 1, 1, 1)) mixed3b_3x3_pre_relu_conv = self.mixed3b_3x3_pre_relu_conv(mixed3b_3x3_pre_relu_conv_pad) mixed3b_5x5_pre_relu_conv_pad = F.pad(mixed3b_5x5_bottleneck, (2, 2, 2, 2)) mixed3b_5x5_pre_relu_conv = self.mixed3b_5x5_pre_relu_conv(mixed3b_5x5_pre_relu_conv_pad) mixed3b_pool_reduce = self.mixed3b_pool_reduce(mixed3b_pool_reduce_pre_relu_conv) mixed3b_3x3 = self.mixed3b_3x3(mixed3b_3x3_pre_relu_conv) mixed3b_5x5 = self.mixed3b_5x5(mixed3b_5x5_pre_relu_conv) mixed3b = self.mixed3b((mixed3b_1x1, mixed3b_3x3, mixed3b_5x5, mixed3b_pool_reduce), 1) maxpool4_pad = F.pad(mixed3b, (0, 1, 0, 1), value=float('-inf')) maxpool4 = self.maxpool4(maxpool4_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) mixed4a_1x1_pre_relu_conv = self.mixed4a_1x1_pre_relu_conv(maxpool4) mixed4a_3x3_bottleneck_pre_relu_conv = self.mixed4a_3x3_bottleneck_pre_relu_conv(maxpool4) mixed4a_5x5_bottleneck_pre_relu_conv = self.mixed4a_5x5_bottleneck_pre_relu_conv(maxpool4) mixed4a_pool_pad = F.pad(maxpool4, (1, 1, 1, 1), value=float('-inf')) mixed4a_pool = self.mixed4a_pool(mixed4a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed4a_1x1 = self.mixed4a_1x1(mixed4a_1x1_pre_relu_conv) mixed4a_3x3_bottleneck = self.mixed4a_3x3_bottleneck(mixed4a_3x3_bottleneck_pre_relu_conv) mixed4a_5x5_bottleneck = self.mixed4a_5x5_bottleneck(mixed4a_5x5_bottleneck_pre_relu_conv) mixed4a_pool_reduce_pre_relu_conv = self.mixed4a_pool_reduce_pre_relu_conv(mixed4a_pool) mixed4a_3x3_pre_relu_conv_pad = F.pad(mixed4a_3x3_bottleneck, (1, 1, 1, 1)) mixed4a_3x3_pre_relu_conv = self.mixed4a_3x3_pre_relu_conv(mixed4a_3x3_pre_relu_conv_pad) mixed4a_5x5_pre_relu_conv_pad = F.pad(mixed4a_5x5_bottleneck, (2, 2, 2, 2)) mixed4a_5x5_pre_relu_conv = self.mixed4a_5x5_pre_relu_conv(mixed4a_5x5_pre_relu_conv_pad) mixed4a_pool_reduce = self.mixed4a_pool_reduce(mixed4a_pool_reduce_pre_relu_conv) mixed4a_3x3 = self.mixed4a_3x3(mixed4a_3x3_pre_relu_conv) mixed4a_5x5 = self.mixed4a_5x5(mixed4a_5x5_pre_relu_conv) mixed4a = self.mixed4a((mixed4a_1x1, mixed4a_3x3, mixed4a_5x5, mixed4a_pool_reduce), 1) mixed4b_1x1_pre_relu_conv = self.mixed4b_1x1_pre_relu_conv(mixed4a) mixed4b_3x3_bottleneck_pre_relu_conv = self.mixed4b_3x3_bottleneck_pre_relu_conv(mixed4a) mixed4b_5x5_bottleneck_pre_relu_conv = self.mixed4b_5x5_bottleneck_pre_relu_conv(mixed4a) mixed4b_pool_pad = F.pad(mixed4a, (1, 1, 1, 1), value=float('-inf')) mixed4b_pool = self.mixed4b_pool(mixed4b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed4b_1x1 = self.mixed4b_1x1(mixed4b_1x1_pre_relu_conv) mixed4b_3x3_bottleneck = self.mixed4b_3x3_bottleneck(mixed4b_3x3_bottleneck_pre_relu_conv) mixed4b_5x5_bottleneck = self.mixed4b_5x5_bottleneck(mixed4b_5x5_bottleneck_pre_relu_conv) mixed4b_pool_reduce_pre_relu_conv = self.mixed4b_pool_reduce_pre_relu_conv(mixed4b_pool) mixed4b_3x3_pre_relu_conv_pad = F.pad(mixed4b_3x3_bottleneck, (1, 1, 1, 1)) mixed4b_3x3_pre_relu_conv = self.mixed4b_3x3_pre_relu_conv(mixed4b_3x3_pre_relu_conv_pad) mixed4b_5x5_pre_relu_conv_pad = F.pad(mixed4b_5x5_bottleneck, (2, 2, 2, 2)) mixed4b_5x5_pre_relu_conv = self.mixed4b_5x5_pre_relu_conv(mixed4b_5x5_pre_relu_conv_pad) mixed4b_pool_reduce = self.mixed4b_pool_reduce(mixed4b_pool_reduce_pre_relu_conv) mixed4b_3x3 = self.mixed4b_3x3(mixed4b_3x3_pre_relu_conv) mixed4b_5x5 = self.mixed4b_5x5(mixed4b_5x5_pre_relu_conv) mixed4b = self.mixed4b((mixed4b_1x1, mixed4b_3x3, mixed4b_5x5, mixed4b_pool_reduce), 1) mixed4c_1x1_pre_relu_conv = self.mixed4c_1x1_pre_relu_conv(mixed4b) mixed4c_3x3_bottleneck_pre_relu_conv = self.mixed4c_3x3_bottleneck_pre_relu_conv(mixed4b) mixed4c_5x5_bottleneck_pre_relu_conv = self.mixed4c_5x5_bottleneck_pre_relu_conv(mixed4b) mixed4c_pool_pad = F.pad(mixed4b, (1, 1, 1, 1), value=float('-inf')) mixed4c_pool = self.mixed4c_pool(mixed4c_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed4c_1x1 = self.mixed4c_1x1(mixed4c_1x1_pre_relu_conv) mixed4c_3x3_bottleneck = self.mixed4c_3x3_bottleneck(mixed4c_3x3_bottleneck_pre_relu_conv) mixed4c_5x5_bottleneck = self.mixed4c_5x5_bottleneck(mixed4c_5x5_bottleneck_pre_relu_conv) mixed4c_pool_reduce_pre_relu_conv = self.mixed4c_pool_reduce_pre_relu_conv(mixed4c_pool) mixed4c_3x3_pre_relu_conv_pad = F.pad(mixed4c_3x3_bottleneck, (1, 1, 1, 1)) mixed4c_3x3_pre_relu_conv = self.mixed4c_3x3_pre_relu_conv(mixed4c_3x3_pre_relu_conv_pad) mixed4c_5x5_pre_relu_conv_pad = F.pad(mixed4c_5x5_bottleneck, (2, 2, 2, 2)) mixed4c_5x5_pre_relu_conv = self.mixed4c_5x5_pre_relu_conv(mixed4c_5x5_pre_relu_conv_pad) mixed4c_pool_reduce = self.mixed4c_pool_reduce(mixed4c_pool_reduce_pre_relu_conv) mixed4c_3x3 = self.mixed4c_3x3(mixed4c_3x3_pre_relu_conv) mixed4c_5x5 = self.mixed4c_5x5(mixed4c_5x5_pre_relu_conv) mixed4c = self.mixed4c((mixed4c_1x1, mixed4c_3x3, mixed4c_5x5, mixed4c_pool_reduce), 1) mixed4d_1x1_pre_relu_conv = self.mixed4d_1x1_pre_relu_conv(mixed4c) mixed4d_3x3_bottleneck_pre_relu_conv = self.mixed4d_3x3_bottleneck_pre_relu_conv(mixed4c) mixed4d_5x5_bottleneck_pre_relu_conv = self.mixed4d_5x5_bottleneck_pre_relu_conv(mixed4c) mixed4d_pool_pad = F.pad(mixed4c, (1, 1, 1, 1), value=float('-inf')) mixed4d_pool = self.mixed4d_pool(mixed4d_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed4d_1x1 = self.mixed4d_1x1(mixed4d_1x1_pre_relu_conv) mixed4d_3x3_bottleneck = self.mixed4d_3x3_bottleneck(mixed4d_3x3_bottleneck_pre_relu_conv) mixed4d_5x5_bottleneck = self.mixed4d_5x5_bottleneck(mixed4d_5x5_bottleneck_pre_relu_conv) mixed4d_pool_reduce_pre_relu_conv = self.mixed4d_pool_reduce_pre_relu_conv(mixed4d_pool) mixed4d_3x3_pre_relu_conv_pad = F.pad(mixed4d_3x3_bottleneck, (1, 1, 1, 1)) mixed4d_3x3_pre_relu_conv = self.mixed4d_3x3_pre_relu_conv(mixed4d_3x3_pre_relu_conv_pad) mixed4d_5x5_pre_relu_conv_pad = F.pad(mixed4d_5x5_bottleneck, (2, 2, 2, 2)) mixed4d_5x5_pre_relu_conv = self.mixed4d_5x5_pre_relu_conv(mixed4d_5x5_pre_relu_conv_pad) mixed4d_pool_reduce = self.mixed4d_pool_reduce(mixed4d_pool_reduce_pre_relu_conv) mixed4d_3x3 = self.mixed4d_3x3(mixed4d_3x3_pre_relu_conv) mixed4d_5x5 = self.mixed4d_5x5(mixed4d_5x5_pre_relu_conv) mixed4d = self.mixed4d((mixed4d_1x1, mixed4d_3x3, mixed4d_5x5, mixed4d_pool_reduce), 1) mixed4e_1x1_pre_relu_conv = self.mixed4e_1x1_pre_relu_conv(mixed4d) mixed4e_3x3_bottleneck_pre_relu_conv = self.mixed4e_3x3_bottleneck_pre_relu_conv(mixed4d) mixed4e_5x5_bottleneck_pre_relu_conv = self.mixed4e_5x5_bottleneck_pre_relu_conv(mixed4d) mixed4e_pool_pad = F.pad(mixed4d, (1, 1, 1, 1), value=float('-inf')) mixed4e_pool = self.mixed4e_pool(mixed4e_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed4e_1x1 = self.mixed4e_1x1(mixed4e_1x1_pre_relu_conv) mixed4e_3x3_bottleneck = self.mixed4e_3x3_bottleneck(mixed4e_3x3_bottleneck_pre_relu_conv) mixed4e_5x5_bottleneck = self.mixed4e_5x5_bottleneck(mixed4e_5x5_bottleneck_pre_relu_conv) mixed4e_pool_reduce_pre_relu_conv = self.mixed4e_pool_reduce_pre_relu_conv(mixed4e_pool) mixed4e_3x3_pre_relu_conv_pad = F.pad(mixed4e_3x3_bottleneck, (1, 1, 1, 1)) mixed4e_3x3_pre_relu_conv = self.mixed4e_3x3_pre_relu_conv(mixed4e_3x3_pre_relu_conv_pad) mixed4e_5x5_pre_relu_conv_pad = F.pad(mixed4e_5x5_bottleneck, (2, 2, 2, 2)) mixed4e_5x5_pre_relu_conv = self.mixed4e_5x5_pre_relu_conv(mixed4e_5x5_pre_relu_conv_pad) mixed4e_pool_reduce = self.mixed4e_pool_reduce(mixed4e_pool_reduce_pre_relu_conv) mixed4e_3x3 = self.mixed4e_3x3(mixed4e_3x3_pre_relu_conv) mixed4e_5x5 = self.mixed4e_5x5(mixed4e_5x5_pre_relu_conv) mixed4e = self.mixed4e((mixed4e_1x1, mixed4e_3x3, mixed4e_5x5, mixed4e_pool_reduce), 1) maxpool10_pad = F.pad(mixed4e, (0, 1, 0, 1), value=float('-inf')) maxpool10 = self.maxpool10(maxpool10_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) mixed5a_1x1_pre_relu_conv = self.mixed5a_1x1_pre_relu_conv(maxpool10) mixed5a_3x3_bottleneck_pre_relu_conv = self.mixed5a_3x3_bottleneck_pre_relu_conv(maxpool10) mixed5a_5x5_bottleneck_pre_relu_conv = self.mixed5a_5x5_bottleneck_pre_relu_conv(maxpool10) mixed5a_pool_pad = F.pad(maxpool10, (1, 1, 1, 1), value=float('-inf')) mixed5a_pool = self.mixed5a_pool(mixed5a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed5a_1x1 = self.mixed5a_1x1(mixed5a_1x1_pre_relu_conv) mixed5a_3x3_bottleneck = self.mixed5a_3x3_bottleneck(mixed5a_3x3_bottleneck_pre_relu_conv) mixed5a_5x5_bottleneck = self.mixed5a_5x5_bottleneck(mixed5a_5x5_bottleneck_pre_relu_conv) mixed5a_pool_reduce_pre_relu_conv = self.mixed5a_pool_reduce_pre_relu_conv(mixed5a_pool) mixed5a_3x3_pre_relu_conv_pad = F.pad(mixed5a_3x3_bottleneck, (1, 1, 1, 1)) mixed5a_3x3_pre_relu_conv = self.mixed5a_3x3_pre_relu_conv(mixed5a_3x3_pre_relu_conv_pad) mixed5a_5x5_pre_relu_conv_pad = F.pad(mixed5a_5x5_bottleneck, (2, 2, 2, 2)) mixed5a_5x5_pre_relu_conv = self.mixed5a_5x5_pre_relu_conv(mixed5a_5x5_pre_relu_conv_pad) mixed5a_pool_reduce = self.mixed5a_pool_reduce(mixed5a_pool_reduce_pre_relu_conv) mixed5a_3x3 = self.mixed5a_3x3(mixed5a_3x3_pre_relu_conv) mixed5a_5x5 = self.mixed5a_5x5(mixed5a_5x5_pre_relu_conv) mixed5a = self.mixed5a((mixed5a_1x1, mixed5a_3x3, mixed5a_5x5, mixed5a_pool_reduce), 1) mixed5b_1x1_pre_relu_conv = self.mixed5b_1x1_pre_relu_conv(mixed5a) mixed5b_3x3_bottleneck_pre_relu_conv = self.mixed5b_3x3_bottleneck_pre_relu_conv(mixed5a) mixed5b_5x5_bottleneck_pre_relu_conv = self.mixed5b_5x5_bottleneck_pre_relu_conv(mixed5a) mixed5b_pool_pad = F.pad(mixed5a, (1, 1, 1, 1), value=float('-inf')) mixed5b_pool = self.mixed5b_pool(mixed5b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) mixed5b_1x1 = self.mixed5b_1x1(mixed5b_1x1_pre_relu_conv) mixed5b_3x3_bottleneck = self.mixed5b_3x3_bottleneck(mixed5b_3x3_bottleneck_pre_relu_conv) mixed5b_5x5_bottleneck = self.mixed5b_5x5_bottleneck(mixed5b_5x5_bottleneck_pre_relu_conv) mixed5b_pool_reduce_pre_relu_conv = self.mixed5b_pool_reduce_pre_relu_conv(mixed5b_pool) mixed5b_3x3_pre_relu_conv_pad = F.pad(mixed5b_3x3_bottleneck, (1, 1, 1, 1)) mixed5b_3x3_pre_relu_conv = self.mixed5b_3x3_pre_relu_conv(mixed5b_3x3_pre_relu_conv_pad) mixed5b_5x5_pre_relu_conv_pad = F.pad(mixed5b_5x5_bottleneck, (2, 2, 2, 2)) mixed5b_5x5_pre_relu_conv = self.mixed5b_5x5_pre_relu_conv(mixed5b_5x5_pre_relu_conv_pad) mixed5b_pool_reduce = self.mixed5b_pool_reduce(mixed5b_pool_reduce_pre_relu_conv) mixed5b_3x3 = self.mixed5b_3x3(mixed5b_3x3_pre_relu_conv) mixed5b_5x5 = self.mixed5b_5x5(mixed5b_5x5_pre_relu_conv) mixed5b = self.mixed5b((mixed5b_1x1, mixed5b_3x3, mixed5b_5x5, mixed5b_pool_reduce), 1) avgpool0 = F.avg_pool2d(mixed5b, kernel_size=(7, 7), stride=(1, 1), padding=(0,), ceil_mode=False, count_include_pad=False) avgpool0_reshape = torch.reshape(input=avgpool0, shape=(-1, 1024)) softmax2_pre_activation_matmul = self.softmax2_pre_activation_matmul(avgpool0_reshape) softmax2 = self.softmax2(softmax2_pre_activation_matmul) return softmax2
def forward(self, x): conv1_7x7_s2_pad = F.pad(x, (3, 3, 3, 3)) conv1_7x7_s2 = self.conv1_7x7_s2(conv1_7x7_s2_pad) conv1_relu_7x7 = self.conv1_relu_7x7(conv1_7x7_s2) pool1_3x3_s2_pad = F.pad(conv1_relu_7x7, (0, 1, 0, 1), value=float('-inf')) pool1_3x3_s2 = self.pool1_3x3_s2(pool1_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) pool1_norm1 = F.local_response_norm(pool1_3x3_s2, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) conv2_3x3_reduce = self.conv2_3x3_reduce(pool1_norm1) conv2_relu_3x3_reduce = self.conv2_relu_3x3_reduce(conv2_3x3_reduce) conv2_3x3_pad = F.pad(conv2_relu_3x3_reduce, (1, 1, 1, 1)) conv2_3x3 = self.conv2_3x3(conv2_3x3_pad) conv2_relu_3x3 = self.conv2_relu_3x3(conv2_3x3) conv2_norm2 = F.local_response_norm(conv2_relu_3x3, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) pool2_3x3_s2_pad = F.pad(conv2_norm2, (0, 1, 0, 1), value=float('-inf')) pool2_3x3_s2 = self.pool2_3x3_s2(pool2_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) inception_3a_pool_pad = F.pad(pool2_3x3_s2, (1, 1, 1, 1), value=float('-inf')) inception_3a_pool = self.inception_3a_pool(inception_3a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_3a_1x1 = self.inception_3a_1x1(pool2_3x3_s2) inception_3a_5x5_reduce = self.inception_3a_5x5_reduce(pool2_3x3_s2) inception_3a_3x3_reduce = self.inception_3a_3x3_reduce(pool2_3x3_s2) inception_3a_pool_proj = self.inception_3a_pool_proj(inception_3a_pool) inception_3a_relu_1x1 = self.inception_3a_relu_1x1(inception_3a_1x1) inception_3a_relu_5x5_reduce = self.inception_3a_relu_5x5_reduce( inception_3a_5x5_reduce) inception_3a_relu_3x3_reduce = self.inception_3a_relu_3x3_reduce( inception_3a_3x3_reduce) inception_3a_relu_pool_proj = self.inception_3a_relu_pool_proj( inception_3a_pool_proj) inception_3a_5x5_pad = F.pad(inception_3a_relu_5x5_reduce, (2, 2, 2, 2)) inception_3a_5x5 = self.inception_3a_5x5(inception_3a_5x5_pad) inception_3a_3x3_pad = F.pad(inception_3a_relu_3x3_reduce, (1, 1, 1, 1)) inception_3a_3x3 = self.inception_3a_3x3(inception_3a_3x3_pad) inception_3a_relu_5x5 = self.inception_3a_relu_5x5(inception_3a_5x5) inception_3a_relu_3x3 = self.inception_3a_relu_3x3(inception_3a_3x3) inception_3a_output = self.inception_3a_output( (inception_3a_relu_1x1, inception_3a_relu_3x3, inception_3a_relu_5x5, inception_3a_relu_pool_proj), 1) inception_3b_3x3_reduce = self.inception_3b_3x3_reduce( inception_3a_output) inception_3b_pool_pad = F.pad(inception_3a_output, (1, 1, 1, 1), value=float('-inf')) inception_3b_pool = self.inception_3b_pool(inception_3b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_3b_1x1 = self.inception_3b_1x1(inception_3a_output) inception_3b_5x5_reduce = self.inception_3b_5x5_reduce( inception_3a_output) inception_3b_relu_3x3_reduce = self.inception_3b_relu_3x3_reduce( inception_3b_3x3_reduce) inception_3b_pool_proj = self.inception_3b_pool_proj(inception_3b_pool) inception_3b_relu_1x1 = self.inception_3b_relu_1x1(inception_3b_1x1) inception_3b_relu_5x5_reduce = self.inception_3b_relu_5x5_reduce( inception_3b_5x5_reduce) inception_3b_3x3_pad = F.pad(inception_3b_relu_3x3_reduce, (1, 1, 1, 1)) inception_3b_3x3 = self.inception_3b_3x3(inception_3b_3x3_pad) inception_3b_relu_pool_proj = self.inception_3b_relu_pool_proj( inception_3b_pool_proj) inception_3b_5x5_pad = F.pad(inception_3b_relu_5x5_reduce, (2, 2, 2, 2)) inception_3b_5x5 = self.inception_3b_5x5(inception_3b_5x5_pad) inception_3b_relu_3x3 = self.inception_3b_relu_3x3(inception_3b_3x3) inception_3b_relu_5x5 = self.inception_3b_relu_5x5(inception_3b_5x5) inception_3b_output = self.inception_3b_output( (inception_3b_relu_1x1, inception_3b_relu_3x3, inception_3b_relu_5x5, inception_3b_relu_pool_proj), 1) pool3_3x3_s2_pad = F.pad(inception_3b_output, (0, 1, 0, 1), value=float('-inf')) pool3_3x3_s2 = self.pool3_3x3_s2(pool3_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) inception_4a_1x1 = self.inception_4a_1x1(pool3_3x3_s2) inception_4a_3x3_reduce = self.inception_4a_3x3_reduce(pool3_3x3_s2) inception_4a_5x5_reduce = self.inception_4a_5x5_reduce(pool3_3x3_s2) inception_4a_pool_pad = F.pad(pool3_3x3_s2, (1, 1, 1, 1), value=float('-inf')) inception_4a_pool = self.inception_4a_pool(inception_4a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4a_relu_1x1 = self.inception_4a_relu_1x1(inception_4a_1x1) inception_4a_relu_3x3_reduce = self.inception_4a_relu_3x3_reduce( inception_4a_3x3_reduce) inception_4a_relu_5x5_reduce = self.inception_4a_relu_5x5_reduce( inception_4a_5x5_reduce) inception_4a_pool_proj = self.inception_4a_pool_proj(inception_4a_pool) inception_4a_3x3_pad = F.pad(inception_4a_relu_3x3_reduce, (1, 1, 1, 1)) inception_4a_3x3 = self.inception_4a_3x3(inception_4a_3x3_pad) inception_4a_5x5_pad = F.pad(inception_4a_relu_5x5_reduce, (2, 2, 2, 2)) inception_4a_5x5 = self.inception_4a_5x5(inception_4a_5x5_pad) inception_4a_relu_pool_proj = self.inception_4a_relu_pool_proj( inception_4a_pool_proj) inception_4a_relu_3x3 = self.inception_4a_relu_3x3(inception_4a_3x3) inception_4a_relu_5x5 = self.inception_4a_relu_5x5(inception_4a_5x5) inception_4a_output = self.inception_4a_output( (inception_4a_relu_1x1, inception_4a_relu_3x3, inception_4a_relu_5x5, inception_4a_relu_pool_proj), 1) inception_4b_pool_pad = F.pad(inception_4a_output, (1, 1, 1, 1), value=float('-inf')) inception_4b_pool = self.inception_4b_pool(inception_4b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4b_5x5_reduce = self.inception_4b_5x5_reduce( inception_4a_output) inception_4b_1x1 = self.inception_4b_1x1(inception_4a_output) inception_4b_3x3_reduce = self.inception_4b_3x3_reduce( inception_4a_output) inception_4b_pool_proj = self.inception_4b_pool_proj(inception_4b_pool) inception_4b_relu_5x5_reduce = self.inception_4b_relu_5x5_reduce( inception_4b_5x5_reduce) inception_4b_relu_1x1 = self.inception_4b_relu_1x1(inception_4b_1x1) inception_4b_relu_3x3_reduce = self.inception_4b_relu_3x3_reduce( inception_4b_3x3_reduce) inception_4b_relu_pool_proj = self.inception_4b_relu_pool_proj( inception_4b_pool_proj) inception_4b_5x5_pad = F.pad(inception_4b_relu_5x5_reduce, (2, 2, 2, 2)) inception_4b_5x5 = self.inception_4b_5x5(inception_4b_5x5_pad) inception_4b_3x3_pad = F.pad(inception_4b_relu_3x3_reduce, (1, 1, 1, 1)) inception_4b_3x3 = self.inception_4b_3x3(inception_4b_3x3_pad) inception_4b_relu_5x5 = self.inception_4b_relu_5x5(inception_4b_5x5) inception_4b_relu_3x3 = self.inception_4b_relu_3x3(inception_4b_3x3) inception_4b_output = self.inception_4b_output( (inception_4b_relu_1x1, inception_4b_relu_3x3, inception_4b_relu_5x5, inception_4b_relu_pool_proj), 1) inception_4c_5x5_reduce = self.inception_4c_5x5_reduce( inception_4b_output) inception_4c_pool_pad = F.pad(inception_4b_output, (1, 1, 1, 1), value=float('-inf')) inception_4c_pool = self.inception_4c_pool(inception_4c_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4c_1x1 = self.inception_4c_1x1(inception_4b_output) inception_4c_3x3_reduce = self.inception_4c_3x3_reduce( inception_4b_output) inception_4c_relu_5x5_reduce = self.inception_4c_relu_5x5_reduce( inception_4c_5x5_reduce) inception_4c_pool_proj = self.inception_4c_pool_proj(inception_4c_pool) inception_4c_relu_1x1 = self.inception_4c_relu_1x1(inception_4c_1x1) inception_4c_relu_3x3_reduce = self.inception_4c_relu_3x3_reduce( inception_4c_3x3_reduce) inception_4c_5x5_pad = F.pad(inception_4c_relu_5x5_reduce, (2, 2, 2, 2)) inception_4c_5x5 = self.inception_4c_5x5(inception_4c_5x5_pad) inception_4c_relu_pool_proj = self.inception_4c_relu_pool_proj( inception_4c_pool_proj) inception_4c_3x3_pad = F.pad(inception_4c_relu_3x3_reduce, (1, 1, 1, 1)) inception_4c_3x3 = self.inception_4c_3x3(inception_4c_3x3_pad) inception_4c_relu_5x5 = self.inception_4c_relu_5x5(inception_4c_5x5) inception_4c_relu_3x3 = self.inception_4c_relu_3x3(inception_4c_3x3) inception_4c_output = self.inception_4c_output( (inception_4c_relu_1x1, inception_4c_relu_3x3, inception_4c_relu_5x5, inception_4c_relu_pool_proj), 1) inception_4d_pool_pad = F.pad(inception_4c_output, (1, 1, 1, 1), value=float('-inf')) inception_4d_pool = self.inception_4d_pool(inception_4d_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4d_3x3_reduce = self.inception_4d_3x3_reduce( inception_4c_output) inception_4d_1x1 = self.inception_4d_1x1(inception_4c_output) inception_4d_5x5_reduce = self.inception_4d_5x5_reduce( inception_4c_output) inception_4d_pool_proj = self.inception_4d_pool_proj(inception_4d_pool) inception_4d_relu_3x3_reduce = self.inception_4d_relu_3x3_reduce( inception_4d_3x3_reduce) inception_4d_relu_1x1 = self.inception_4d_relu_1x1(inception_4d_1x1) inception_4d_relu_5x5_reduce = self.inception_4d_relu_5x5_reduce( inception_4d_5x5_reduce) inception_4d_relu_pool_proj = self.inception_4d_relu_pool_proj( inception_4d_pool_proj) inception_4d_3x3_pad = F.pad(inception_4d_relu_3x3_reduce, (1, 1, 1, 1)) inception_4d_3x3 = self.inception_4d_3x3(inception_4d_3x3_pad) inception_4d_5x5_pad = F.pad(inception_4d_relu_5x5_reduce, (2, 2, 2, 2)) inception_4d_5x5 = self.inception_4d_5x5(inception_4d_5x5_pad) inception_4d_relu_3x3 = self.inception_4d_relu_3x3(inception_4d_3x3) inception_4d_relu_5x5 = self.inception_4d_relu_5x5(inception_4d_5x5) inception_4d_output = self.inception_4d_output( (inception_4d_relu_1x1, inception_4d_relu_3x3, inception_4d_relu_5x5, inception_4d_relu_pool_proj), 1) inception_4e_5x5_reduce = self.inception_4e_5x5_reduce( inception_4d_output) inception_4e_1x1 = self.inception_4e_1x1(inception_4d_output) inception_4e_3x3_reduce = self.inception_4e_3x3_reduce( inception_4d_output) inception_4e_pool_pad = F.pad(inception_4d_output, (1, 1, 1, 1), value=float('-inf')) inception_4e_pool = self.inception_4e_pool(inception_4e_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4e_relu_5x5_reduce = self.inception_4e_relu_5x5_reduce( inception_4e_5x5_reduce) inception_4e_relu_1x1 = self.inception_4e_relu_1x1(inception_4e_1x1) inception_4e_relu_3x3_reduce = self.inception_4e_relu_3x3_reduce( inception_4e_3x3_reduce) inception_4e_pool_proj = self.inception_4e_pool_proj(inception_4e_pool) inception_4e_5x5_pad = F.pad(inception_4e_relu_5x5_reduce, (2, 2, 2, 2)) inception_4e_5x5 = self.inception_4e_5x5(inception_4e_5x5_pad) inception_4e_3x3_pad = F.pad(inception_4e_relu_3x3_reduce, (1, 1, 1, 1)) inception_4e_3x3 = self.inception_4e_3x3(inception_4e_3x3_pad) inception_4e_relu_pool_proj = self.inception_4e_relu_pool_proj( inception_4e_pool_proj) inception_4e_relu_5x5 = self.inception_4e_relu_5x5(inception_4e_5x5) inception_4e_relu_3x3 = self.inception_4e_relu_3x3(inception_4e_3x3) inception_4e_output = self.inception_4e_output( (inception_4e_relu_1x1, inception_4e_relu_3x3, inception_4e_relu_5x5, inception_4e_relu_pool_proj), 1) pool4_3x3_s2_pad = F.pad(inception_4e_output, (0, 1, 0, 1), value=float('-inf')) pool4_3x3_s2 = self.pool4_3x3_s2(pool4_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) inception_5a_1x1 = self.inception_5a_1x1(pool4_3x3_s2) inception_5a_5x5_reduce = self.inception_5a_5x5_reduce(pool4_3x3_s2) inception_5a_pool_pad = F.pad(pool4_3x3_s2, (1, 1, 1, 1), value=float('-inf')) inception_5a_pool = self.inception_5a_pool(inception_5a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_5a_3x3_reduce = self.inception_5a_3x3_reduce(pool4_3x3_s2) inception_5a_relu_1x1 = self.inception_5a_relu_1x1(inception_5a_1x1) inception_5a_relu_5x5_reduce = self.inception_5a_relu_5x5_reduce( inception_5a_5x5_reduce) inception_5a_pool_proj = self.inception_5a_pool_proj(inception_5a_pool) inception_5a_relu_3x3_reduce = self.inception_5a_relu_3x3_reduce( inception_5a_3x3_reduce) inception_5a_5x5_pad = F.pad(inception_5a_relu_5x5_reduce, (2, 2, 2, 2)) inception_5a_5x5 = self.inception_5a_5x5(inception_5a_5x5_pad) inception_5a_relu_pool_proj = self.inception_5a_relu_pool_proj( inception_5a_pool_proj) inception_5a_3x3_pad = F.pad(inception_5a_relu_3x3_reduce, (1, 1, 1, 1)) inception_5a_3x3 = self.inception_5a_3x3(inception_5a_3x3_pad) inception_5a_relu_5x5 = self.inception_5a_relu_5x5(inception_5a_5x5) inception_5a_relu_3x3 = self.inception_5a_relu_3x3(inception_5a_3x3) inception_5a_output = self.inception_5a_output( (inception_5a_relu_1x1, inception_5a_relu_3x3, inception_5a_relu_5x5, inception_5a_relu_pool_proj), 1) inception_5b_pool_pad = F.pad(inception_5a_output, (1, 1, 1, 1), value=float('-inf')) inception_5b_pool = self.inception_5b_pool(inception_5b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_5b_3x3_reduce = self.inception_5b_3x3_reduce( inception_5a_output) inception_5b_5x5_reduce = self.inception_5b_5x5_reduce( inception_5a_output) inception_5b_1x1 = self.inception_5b_1x1(inception_5a_output) inception_5b_pool_proj = self.inception_5b_pool_proj(inception_5b_pool) inception_5b_relu_3x3_reduce = self.inception_5b_relu_3x3_reduce( inception_5b_3x3_reduce) inception_5b_relu_5x5_reduce = self.inception_5b_relu_5x5_reduce( inception_5b_5x5_reduce) inception_5b_relu_1x1 = self.inception_5b_relu_1x1(inception_5b_1x1) inception_5b_relu_pool_proj = self.inception_5b_relu_pool_proj( inception_5b_pool_proj) inception_5b_3x3_pad = F.pad(inception_5b_relu_3x3_reduce, (1, 1, 1, 1)) inception_5b_3x3 = self.inception_5b_3x3(inception_5b_3x3_pad) inception_5b_5x5_pad = F.pad(inception_5b_relu_5x5_reduce, (2, 2, 2, 2)) inception_5b_5x5 = self.inception_5b_5x5(inception_5b_5x5_pad) inception_5b_relu_3x3 = self.inception_5b_relu_3x3(inception_5b_3x3) inception_5b_relu_5x5 = self.inception_5b_relu_5x5(inception_5b_5x5) inception_5b_output = self.inception_5b_output( (inception_5b_relu_1x1, inception_5b_relu_3x3, inception_5b_relu_5x5, inception_5b_relu_pool_proj), 1) pool5_7x7_s1 = F.avg_pool2d(inception_5b_output, kernel_size=(7, 7), stride=(1, 1), padding=(0, ), ceil_mode=False, count_include_pad=False) pool5_drop_7x7_s1 = self.pool5_drop_7x7_s1(input=pool5_7x7_s1, p=0.4000000059604645, training=self.training, inplace=True) return pool5_drop_7x7_s1
def forward(self, tensor, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0): return F.local_response_norm(tensor, size=size, alpha=alpha, beta=beta, k=k)
def forward(self, input): return F.local_response_norm(input, size=self.size, alpha=self.alpha, beta=self.beta, k=self.k)
def forward(self, x): return F.local_response_norm(x, self.size, self.alpha, self.beta, self.k)
def test_local_response_norm(self): inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype) output = F.local_response_norm(inp, 2, alpha=0.0001, beta=0.75, k=1.0)
def forward(self, x): conv1_pad = F.pad(x, (3, 3, 3, 3)) conv1 = self.conv1(conv1_pad) relu1 = self.relu1(conv1) pool1_pad = F.pad(relu1, (0, 1, 0, 1), value=float('-inf')) pool1 = self.pool1(pool1_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) norm1 = F.local_response_norm(pool1, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) conv2_1x1 = self.conv2_1x1(norm1) relu_conv2_1x1 = self.relu_conv2_1x1(conv2_1x1) conv2_3x3_pad = F.pad(relu_conv2_1x1, (1, 1, 1, 1)) conv2_3x3 = self.conv2_3x3(conv2_3x3_pad) relu2_3x3 = self.relu2_3x3(conv2_3x3) norm2 = F.local_response_norm(relu2_3x3, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0) pool2_pad = F.pad(norm2, (0, 1, 0, 1), value=float('-inf')) pool2 = self.pool2(pool2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) inception_3a_5x5_reduce = self.inception_3a_5x5_reduce(pool2) inception_3a_3x3_reduce = self.inception_3a_3x3_reduce(pool2) inception_3a_1x1 = self.inception_3a_1x1(pool2) inception_3a_pool_pad = F.pad(pool2, (1, 1, 1, 1), value=float('-inf')) inception_3a_pool = self.inception_3a_pool(inception_3a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) relu_inception_3a_5x5_reduce = self.relu_inception_3a_5x5_reduce( inception_3a_5x5_reduce) reulu_inception_3a_3x3_reduce = self.reulu_inception_3a_3x3_reduce( inception_3a_3x3_reduce) relu_inception_3a_1x1 = self.relu_inception_3a_1x1(inception_3a_1x1) inception_3a_pool_proj = self.inception_3a_pool_proj(inception_3a_pool) inception_3a_5x5_pad = F.pad(relu_inception_3a_5x5_reduce, (2, 2, 2, 2)) inception_3a_5x5 = self.inception_3a_5x5(inception_3a_5x5_pad) inception_3a_3x3_pad = F.pad(reulu_inception_3a_3x3_reduce, (1, 1, 1, 1)) inception_3a_3x3 = self.inception_3a_3x3(inception_3a_3x3_pad) relu_inception_3a_pool_proj = self.relu_inception_3a_pool_proj( inception_3a_pool_proj) relu_inception_3a_5x5 = self.relu_inception_3a_5x5(inception_3a_5x5) relu_inception_3a_3x3 = self.relu_inception_3a_3x3(inception_3a_3x3) inception_3a_output = self.inception_3a_output( (relu_inception_3a_1x1, relu_inception_3a_3x3, relu_inception_3a_5x5, relu_inception_3a_pool_proj), 1) inception_3b_1x1 = self.inception_3b_1x1(inception_3a_output) inception_3b_3x3_reduce = self.inception_3b_3x3_reduce( inception_3a_output) inception_3b_5x5_reduce = self.inception_3b_5x5_reduce( inception_3a_output) inception_3b_pool_pad = F.pad(inception_3a_output, (1, 1, 1, 1), value=float('-inf')) inception_3b_pool = self.inception_3b_pool(inception_3b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) relu_inception_3b_1x1 = self.relu_inception_3b_1x1(inception_3b_1x1) relu_inception_3b_3x3_reduce = self.relu_inception_3b_3x3_reduce( inception_3b_3x3_reduce) relu_inception_3b_5x5_reduce = self.relu_inception_3b_5x5_reduce( inception_3b_5x5_reduce) inception_3b_pool_proj = self.inception_3b_pool_proj(inception_3b_pool) inception_3b_3x3_pad = F.pad(relu_inception_3b_3x3_reduce, (1, 1, 1, 1)) inception_3b_3x3 = self.inception_3b_3x3(inception_3b_3x3_pad) inception_3b_5x5_pad = F.pad(relu_inception_3b_5x5_reduce, (2, 2, 2, 2)) inception_3b_5x5 = self.inception_3b_5x5(inception_3b_5x5_pad) relu_inception_3b_pool_proj = self.relu_inception_3b_pool_proj( inception_3b_pool_proj) relu_inception_3b_3x3 = self.relu_inception_3b_3x3(inception_3b_3x3) relu_inception_3b_5x5 = self.relu_inception_3b_5x5(inception_3b_5x5) inception_3b_output = self.inception_3b_output( (relu_inception_3b_1x1, relu_inception_3b_3x3, relu_inception_3b_5x5, relu_inception_3b_pool_proj), 1) pool3_pad = F.pad(inception_3b_output, (0, 1, 0, 1), value=float('-inf')) pool3 = self.pool3(pool3_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) inception_4a_pool_pad = F.pad(pool3, (1, 1, 1, 1), value=float('-inf')) inception_4a_pool = self.inception_4a_pool(inception_4a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4a_3x3_reduce = self.inception_4a_3x3_reduce(pool3) inception_4a_5x5_reduce = self.inception_4a_5x5_reduce(pool3) inception_4a_1x1 = self.inception_4a_1x1(pool3) inception_4a_pool_proj = self.inception_4a_pool_proj(inception_4a_pool) relu_inception_4a_3x3_reduce = self.relu_inception_4a_3x3_reduce( inception_4a_3x3_reduce) relu_inception_4a_5x5_reduce = self.relu_inception_4a_5x5_reduce( inception_4a_5x5_reduce) relu_inception_4a_1x1 = self.relu_inception_4a_1x1(inception_4a_1x1) relu_inception_4a_pool_proj = self.relu_inception_4a_pool_proj( inception_4a_pool_proj) inception_4a_3x3_pad = F.pad(relu_inception_4a_3x3_reduce, (1, 1, 1, 1)) inception_4a_3x3 = self.inception_4a_3x3(inception_4a_3x3_pad) inception_4a_5x5_pad = F.pad(relu_inception_4a_5x5_reduce, (2, 2, 2, 2)) inception_4a_5x5 = self.inception_4a_5x5(inception_4a_5x5_pad) relu_inception_4a_3x3 = self.relu_inception_4a_3x3(inception_4a_3x3) relu_inception_4a_5x5 = self.relu_inception_4a_5x5(inception_4a_5x5) inception_4a_output = self.inception_4a_output( (relu_inception_4a_1x1, relu_inception_4a_3x3, relu_inception_4a_5x5, relu_inception_4a_pool_proj), 1) #loss1_ave_pool = F.avg_pool2d(inception_4a_output, kernel_size=(5, 5), stride=(3, 3), padding=(0,), ceil_mode=True, count_include_pad=False) inception_4b_3x3_reduce = self.inception_4b_3x3_reduce( inception_4a_output) inception_4b_1x1 = self.inception_4b_1x1(inception_4a_output) inception_4b_pool_pad = F.pad(inception_4a_output, (1, 1, 1, 1), value=float('-inf')) inception_4b_pool = self.inception_4b_pool(inception_4b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4b_5x5_reduce = self.inception_4b_5x5_reduce( inception_4a_output) #loss1_conv = self.loss1_conv(loss1_ave_pool) inception_4b_relu_3x3_reduce = self.inception_4b_relu_3x3_reduce( inception_4b_3x3_reduce) inception_4b_relu_1x1 = self.inception_4b_relu_1x1(inception_4b_1x1) inception_4b_pool_proj = self.inception_4b_pool_proj(inception_4b_pool) inception_4b_relu_5x5_reduce = self.inception_4b_relu_5x5_reduce( inception_4b_5x5_reduce) #loss1_relu_conv = F.relu(loss1_conv) inception_4b_3x3_pad = F.pad(inception_4b_relu_3x3_reduce, (1, 1, 1, 1)) inception_4b_3x3 = self.inception_4b_3x3(inception_4b_3x3_pad) inception_4b_relu_pool_proj = self.inception_4b_relu_pool_proj( inception_4b_pool_proj) inception_4b_5x5_pad = F.pad(inception_4b_relu_5x5_reduce, (2, 2, 2, 2)) inception_4b_5x5 = self.inception_4b_5x5(inception_4b_5x5_pad) #loss1_fc_0 = loss1_relu_conv.view(loss1_relu_conv.size(0), -1) inception_4b_relu_3x3 = self.inception_4b_relu_3x3(inception_4b_3x3) inception_4b_relu_5x5 = self.inception_4b_relu_5x5(inception_4b_5x5) #loss1_fc_1 = self.loss1_fc_1(loss1_fc_0) inception_4b_output = self.inception_4b_output( (inception_4b_relu_1x1, inception_4b_relu_3x3, inception_4b_relu_5x5, inception_4b_relu_pool_proj), 1) #loss1_relu_fc = F.relu(loss1_fc_1) inception_4c_pool_pad = F.pad(inception_4b_output, (1, 1, 1, 1), value=float('-inf')) inception_4c_pool = self.inception_4c_pool(inception_4c_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4c_1x1 = self.inception_4c_1x1(inception_4b_output) inception_4c_5x5_reduce = self.inception_4c_5x5_reduce( inception_4b_output) inception_4c_3x3_reduce = self.inception_4c_3x3_reduce( inception_4b_output) #loss1_drop_fc = F.dropout(input = loss1_relu_fc, p = 0.699999988079071, training = self.training, inplace = True) inception_4c_pool_proj = self.inception_4c_pool_proj(inception_4c_pool) inception_4c_relu_1x1 = self.inception_4c_relu_1x1(inception_4c_1x1) inception_4c_relu_5x5_reduce = self.inception_4c_relu_5x5_reduce( inception_4c_5x5_reduce) inception_4c_relu_3x3_reduce = self.inception_4c_relu_3x3_reduce( inception_4c_3x3_reduce) #loss1_classifier_model_0 = loss1_drop_fc.view(loss1_drop_fc.size(0), -1) inception_4c_relu_pool_proj = self.inception_4c_relu_pool_proj( inception_4c_pool_proj) inception_4c_5x5_pad = F.pad(inception_4c_relu_5x5_reduce, (2, 2, 2, 2)) inception_4c_5x5 = self.inception_4c_5x5(inception_4c_5x5_pad) inception_4c_3x3_pad = F.pad(inception_4c_relu_3x3_reduce, (1, 1, 1, 1)) inception_4c_3x3 = self.inception_4c_3x3(inception_4c_3x3_pad) #loss1_classifier_model_1 = self.loss1_classifier_model_1(loss1_classifier_model_0) inception_4c_relu_5x5 = self.inception_4c_relu_5x5(inception_4c_5x5) inception_4c_relu_3x3 = self.inception_4c_relu_3x3(inception_4c_3x3) inception_4c_output = self.inception_4c_output( (inception_4c_relu_1x1, inception_4c_relu_3x3, inception_4c_relu_5x5, inception_4c_relu_pool_proj), 1) inception_4d_1x1 = self.inception_4d_1x1(inception_4c_output) inception_4d_3x3_reduce = self.inception_4d_3x3_reduce( inception_4c_output) inception_4d_5x5_reduce = self.inception_4d_5x5_reduce( inception_4c_output) inception_4d_pool_pad = F.pad(inception_4c_output, (1, 1, 1, 1), value=float('-inf')) inception_4d_pool = self.inception_4d_pool(inception_4d_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4d_relu_1x1 = self.inception_4d_relu_1x1(inception_4d_1x1) inception_4d_relu_3x3_reduce = self.inception_4d_relu_3x3_reduce( inception_4d_3x3_reduce) inception_4d_relu_5x5_reduce = self.inception_4d_relu_5x5_reduce( inception_4d_5x5_reduce) inception_4d_pool_proj = self.inception_4d_pool_proj(inception_4d_pool) inception_4d_3x3_pad = F.pad(inception_4d_relu_3x3_reduce, (1, 1, 1, 1)) inception_4d_3x3 = self.inception_4d_3x3(inception_4d_3x3_pad) inception_4d_5x5_pad = F.pad(inception_4d_relu_5x5_reduce, (2, 2, 2, 2)) inception_4d_5x5 = self.inception_4d_5x5(inception_4d_5x5_pad) inception_4d_relu_pool_proj = self.inception_4d_relu_pool_proj( inception_4d_pool_proj) inception_4d_relu_3x3 = self.inception_4d_relu_3x3(inception_4d_3x3) inception_4d_relu_5x5 = self.inception_4d_relu_5x5(inception_4d_5x5) inception_4d_output = self.inception_4d_output( (inception_4d_relu_1x1, inception_4d_relu_3x3, inception_4d_relu_5x5, inception_4d_relu_pool_proj), 1) #loss2_ave_pool = F.avg_pool2d(inception_4d_output, kernel_size=(5, 5), stride=(3, 3), padding=(0,), ceil_mode=True, count_include_pad=False) inception_4e_pool_pad = F.pad(inception_4d_output, (1, 1, 1, 1), value=float('-inf')) inception_4e_pool = self.inception_4e_pool(inception_4e_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_4e_1x1 = self.inception_4e_1x1(inception_4d_output) inception_4e_3x3_reduce = self.inception_4e_3x3_reduce( inception_4d_output) inception_4e_5x5_reduce = self.inception_4e_5x5_reduce( inception_4d_output) #loss2_conv = self.loss2_conv(loss2_ave_pool) inception_4e_pool_proj = self.inception_4e_pool_proj(inception_4e_pool) inception_4e_relu_1x1 = self.inception_4e_relu_1x1(inception_4e_1x1) inception_4e_relu_3x3_reduce = self.inception_4e_relu_3x3_reduce( inception_4e_3x3_reduce) inception_4e_relu_5x5_reduce = self.inception_4e_relu_5x5_reduce( inception_4e_5x5_reduce) #loss2_relu_conv = F.relu(loss2_conv) inception_4e_relu_pool_proj = self.inception_4e_relu_pool_proj( inception_4e_pool_proj) inception_4e_3x3_pad = F.pad(inception_4e_relu_3x3_reduce, (1, 1, 1, 1)) inception_4e_3x3 = self.inception_4e_3x3(inception_4e_3x3_pad) inception_4e_5x5_pad = F.pad(inception_4e_relu_5x5_reduce, (2, 2, 2, 2)) inception_4e_5x5 = self.inception_4e_5x5(inception_4e_5x5_pad) #loss2_fc_0 = loss2_relu_conv.view(loss2_relu_conv.size(0), -1) inception_4e_relu_3x3 = self.inception_4e_relu_3x3(inception_4e_3x3) inception_4e_relu_5x5 = self.inception_4e_relu_5x5(inception_4e_5x5) #loss2_fc_1 = self.loss2_fc_1(loss2_fc_0) inception_4e_output = self.inception_4e_output( (inception_4e_relu_1x1, inception_4e_relu_3x3, inception_4e_relu_5x5, inception_4e_relu_pool_proj), 1) #loss2_relu_fc = F.relu(loss2_fc_1) pool4_pad = F.pad(inception_4e_output, (0, 1, 0, 1), value=float('-inf')) pool4 = self.pool4(pool4_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False) #loss2_drop_fc = F.dropout(input = loss2_relu_fc, p = 0.699999988079071, training = self.training, inplace = True) inception_5a_pool_pad = F.pad(pool4, (1, 1, 1, 1), value=float('-inf')) inception_5a_pool = self.inception_5a_pool(inception_5a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_5a_5x5_reduce = self.inception_5a_5x5_reduce(pool4) inception_5a_3x3_reduce = self.inception_5a_3x3_reduce(pool4) inception_5a_1x1 = self.inception_5a_1x1(pool4) #loss2_classifier_model_0 = loss2_drop_fc.view(loss2_drop_fc.size(0), -1) inception_5a_pool_proj = self.inception_5a_pool_proj(inception_5a_pool) inception_5a_relu_5x5_reduce = self.inception_5a_relu_5x5_reduce( inception_5a_5x5_reduce) inception_5a_relu_3x3_reduce = self.inception_5a_relu_3x3_reduce( inception_5a_3x3_reduce) inception_5a_relu_1x1 = self.inception_5a_relu_1x1(inception_5a_1x1) #loss2_classifier_model_1 = self.loss2_classifier_model_1(loss2_classifier_model_0) inception_5a_relu_pool_proj = self.inception_5a_relu_pool_proj( inception_5a_pool_proj) inception_5a_5x5_pad = F.pad(inception_5a_relu_5x5_reduce, (2, 2, 2, 2)) inception_5a_5x5 = self.inception_5a_5x5(inception_5a_5x5_pad) inception_5a_3x3_pad = F.pad(inception_5a_relu_3x3_reduce, (1, 1, 1, 1)) inception_5a_3x3 = self.inception_5a_3x3(inception_5a_3x3_pad) inception_5a_relu_5x5 = self.inception_5a_relu_5x5(inception_5a_5x5) inception_5a_relu_3x3 = self.inception_5a_relu_3x3(inception_5a_3x3) inception_5a_output = self.inception_5a_output( (inception_5a_relu_1x1, inception_5a_relu_3x3, inception_5a_relu_5x5, inception_5a_relu_pool_proj), 1) inception_5b_3x3_reduce = self.inception_5b_3x3_reduce( inception_5a_output) inception_5b_1x1 = self.inception_5b_1x1(inception_5a_output) inception_5b_5x5_reduce = self.inception_5b_5x5_reduce( inception_5a_output) inception_5b_pool_pad = F.pad(inception_5a_output, (1, 1, 1, 1), value=float('-inf')) inception_5b_pool = self.inception_5b_pool(inception_5b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False) inception_5b_relu_3x3_reduce = self.inception_5b_relu_3x3_reduce( inception_5b_3x3_reduce) inception_5b_relu_1x1 = self.inception_5b_relu_1x1(inception_5b_1x1) inception_5b_relu_5x5_reduce = self.inception_5b_relu_5x5_reduce( inception_5b_5x5_reduce) inception_5b_pool_proj = self.inception_5b_pool_proj(inception_5b_pool) inception_5b_3x3_pad = F.pad(inception_5b_relu_3x3_reduce, (1, 1, 1, 1)) inception_5b_3x3 = self.inception_5b_3x3(inception_5b_3x3_pad) inception_5b_5x5_pad = F.pad(inception_5b_relu_5x5_reduce, (2, 2, 2, 2)) inception_5b_5x5 = self.inception_5b_5x5(inception_5b_5x5_pad) inception_5b_relu_pool_proj = self.inception_5b_relu_pool_proj( inception_5b_pool_proj) inception_5b_relu_3x3 = self.inception_5b_relu_3x3(inception_5b_3x3) inception_5b_relu_5x5 = self.inception_5b_relu_5x5(inception_5b_5x5) inception_5b_output = self.inception_5b_output( (inception_5b_relu_1x1, inception_5b_relu_3x3, inception_5b_relu_5x5, inception_5b_relu_pool_proj), 1) pool5 = F.avg_pool2d(inception_5b_output, kernel_size=(7, 7), stride=(1, 1), padding=(0, ), ceil_mode=False, count_include_pad=False) pool5_drop = self.pool5_drop(input=pool5, p=0.4000000059604645, training=self.training, inplace=True) return pool5_drop