def forward(self, x): batch_size, C, h, w = x.shape # x: NxCxHxW N = int(h * w) min_h = min(h, w) h1 = 1000 w1 = 1000 if h < h1 and w < w1: x_sub = x elif h < h1 and w > w1: # H = (h - h1) // 2 W = (w - w1) // 2 x_sub = x[:, :, :, W:(W + w1)] elif w < w1 and h > h1: H = (h - h1) // 2 # W = (w - w1) // 2 x_sub = x[:, :, H:H + h1, :] else: H = (h - h1) // 2 W = (w - w1) // 2 x_sub = x[:, :, H:(H + h1), W:(W + w1)] ## MPN-COV cov_mat = MPNCOV.CovpoolLayer(x_sub) # Global Covariance pooling layer cov_mat_sqrt = MPNCOV.SqrtmLayer( cov_mat, 5 ) # Matrix square root layer( including pre-norm,Newton-Schulz iter. and post-com. with 5 iteration) ## cov_mat_sum = torch.mean(cov_mat_sqrt, 1) cov_mat_sum = cov_mat_sum.view(batch_size, C, 1, 1) y_cov = self.conv_du(cov_mat_sum) return y_cov * x
def forward(self, x): b, c, h, w = x.shape h1 = 1000 w1 = 1000 if h < h1 and w < w1: x_sub = x elif h < h1 and w > w1: W = (w - w1) // 2 x_sub = x[:, :, :, W:(W + w1)] elif w < w1 and h > h1: H = (h - h1) // 2 x_sub = x[:, :, H:H + h1, :] else: H = (h - h1) // 2 W = (w - w1) // 2 x_sub = x[:, :, H:(H + h1), W:(W + w1)] # MPN-COV cov_mat = MPNCOV.CovpoolLayer(x_sub) cov_mat_sqrt = MPNCOV.SqrtmLayer(cov_mat, 5) cov_mat_sum = torch.mean(cov_mat_sqrt, 1) cov_mat_sum = cov_mat_sum.view(b, c, 1, 1) y_cov = self.conv_du(cov_mat_sum) return y_cov * x
def forward(self, input): hpf_weights = F.softmax(self.alphas_hpf, dim=-1) s0 = self.stem(input, hpf_weights) s1 = s0 for i, cell in enumerate(self.cells): if cell.reduction: weights = F.softmax(self.alphas_reduce, dim=-1) n = 3 start = 2 weights2 = F.softmax(self.betas_reduce[0:2], dim=-1) for i in range(self._steps-1): end = start + n tw2 = F.softmax(self.betas_reduce[start:end], dim=-1) start = end n += 1 weights2 = torch.cat([weights2,tw2],dim=0) else: weights = F.softmax(self.alphas_normal, dim=-1) n = 3 start = 2 weights2 = F.softmax(self.betas_normal[0:2], dim=-1) for i in range(self._steps-1): end = start + n tw2 = F.softmax(self.betas_normal[start:end], dim=-1) start = end n += 1 weights2 = torch.cat([weights2,tw2],dim=0) s0, s1 = s1, cell(s0, s1, weights,weights2) # out = self.global_pooling(s1) out = MPNCOV.CovpoolLayer(s1) out = MPNCOV.SqrtmLayer(out, 5) out = MPNCOV.TriuvecLayer(out) logits = self.classifier(out.view(out.size(0),-1)) return logits
def forward(self, input): output = input output = self.group1(output) output = self.group2(output) output = self.group3(output) output = self.group4(output) output = self.group5(output) #Global covariance pooling output = MPNCOV.CovpoolLayer(output) output = MPNCOV.SqrtmLayer(output, 5) output = MPNCOV.TriuvecLayer(output) output = output.view(output.size(0), -1) output = self.fc1(output) return output
def forward(self, input): logits_aux = None s0 = self.stem0(input) s1 = s0 for i, cell in enumerate(self.cells): s0, s1 = s1, cell(s0, s1, self.drop_path_prob) if i == 2 * self._layers // 3: if self._auxiliary and self.training: logits_aux = self.auxiliary_head(s1) # out = self.global_pooling(s1) out = MPNCOV.CovpoolLayer(s1) out = MPNCOV.SqrtmLayer(out, 5) out = MPNCOV.TriuvecLayer(out) logits = self.classifier(out.view(out.size(0), -1)) return logits, logits_aux
def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) # 1x1 Conv. for dimension reduction x = self.layer_reduce(x) x = self.layer_reduce_bn(x) x = self.layer_reduce_relu(x) x = MPNCOV.CovpoolLayer(x) x = MPNCOV.SqrtmLayer(x, 5) x = MPNCOV.TriuvecLayer(x) x = x.view(x.size(0), -1) x = self.fc(x) return x
def forward(self, x): cov_mat = MPNCOV.CovpoolLayer(x) return cov_mat