Exemple #1
0
def stdv_channels(F):
    assert (F.dim() == 4)
    F_mean = mean_channels(F)
    F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(
        2, keepdim=True) / (F.size(2) * F.size(3))
    x = F_variance.pow(0.5)
    return x
Exemple #2
0
def spectral_clustering(F, K=10, clusters=10, Niters=10, sigma=1):
    '''
    Input:
        Sample features F, N x D
        K: Number of eigenvectors for K-Means clustering
        clusters: number of K-Means clusters
        Niters: NUmber of iterations for K-Means clustering
    Output:
        cl: cluster label for each sample, N x 1
        c: centroids of each cluster, clusters x D
    '''
    # Get K eigenvectors with K-way normalized cuts
    k_eigvec = kway_normcuts(F, K=K, sigma=sigma)

    #  Spectral embedding with K eigen vectors
    cl, _ = KMeans(k_eigvec, K=clusters, Niters=Niters, verbose=False)

    # Get unique labels and samples numbers of each cluster
    Ncl = cl.view(cl.size(0), 1).expand(-1, F.size(1))
    unique_labels, labels_count = Ncl.unique(dim=0, return_counts=True)

    # As some clusters don't contain any samples, manually assign count as 1
    labels_count_all = torch.ones([clusters]).long().cuda()
    labels_count_all[unique_labels[:, 0]] = labels_count

    # Calcualte feature centroids
    c = torch.zeros([clusters, F.size(1)],
                    dtype=torch.float).cuda().scatter_add_(0, Ncl, F)
    c = c / labels_count_all.float().unsqueeze(1)

    return cl, c
Exemple #3
0
 def flux(u):
     F = torch.zeros((u.size(0) - 2, u.size(1) - 1))
     for i in range(F.size(0)):
         for j in range(F.size(1)):
             F_inp = u[i:i + 3, j:j + 2].reshape(6)
             F[i, j] = self.net.forward(F_inp.type(torch.float32))
     return F
Exemple #4
0
 def rescaled_contrast_layer(F):
     assert (F.dim() == 4)
     F_mean = mean_channels(F)
     F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(
         2, keepdim=True) / (F.size(2) * F.size(3))
     # return F_mean / F_variance.pow(0.5)
     # return - F_mean + F_variance
     return -F_mean / F_variance.pow(0.5) + F_variance.pow(0.5)
 def flux(u):
     F = torch.zeros((u.size(0) - 2, u.size(1) - 1))
     for i in range(F.size(0)):
         for j in range(F.size(1)):
             F_inp = torch.zeros(7)
             F_inp[:-1] = u[i:i + 3, j:j + 2].reshape(6)
             F_inp[-1] = self.dt / np.min(
                 (self.dx, self.dy)
             )  # DONE (up for testing):Oppskalér self.dt til å stemme overens med dx,dy i finemesh (treningsdata)
             F[i, j] = self.net.forward(F_inp.type(torch.float32))
     return F
def inverseAug(F):
    Cur_out = F.size()[1]
    assert Cur_out % 2 == 1
    AC_len = (Cur_out - 1) / 2
    DC = F[:, Cur_out - 1:Cur_out, :, :]
    AC = F[:, 0:AC_len, :, :] - F[:, AC_len:2 * AC_len, :, :]
    return torch.cat((AC, DC), 1)
Exemple #7
0
    def forward(self, x):
        x1 = self.layer0(x)
        x2 = self.layer1(x1)
        x3 = self.layer2(x2)
        x4 = self.layer3(x3)
        x5 = self.layer4(x4)

        x = x5

        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        out = []
        cls_r = self.classifier(x)
        out.append(cls_r)

        if self.training:
            if self.stage == 1:
                F = x3
            elif self.stage == 2:
                F = x4
            elif self.stage == 3:
                F = x5
            else:
                raise RuntimeError("WHAT'S F**K")
            features = self.align_conv(F).permute(0, 2, 3, 1).view(
                F.size(0), self.size, self.dim)
            features = torch.matmul(features, features.transpose(1, 2))
            mask = torch.stack(
                [torch.eye(self.size) for _ in range(x.size(0))]).byte()
            features[mask] = 0
            features = features / self.dim
            out.append(features)
            out.append(F)

        return out
 def flux(u):
     F = torch.zeros((u.size(0), u.size(1) - 1))
     for i in range(0, F.size(0), 10):
         F[i:i + 10, :] = self.godunov_flux(u[i:i + 10, :-1],
                                            u[i:i + 10, 1:]).reshape(
                                                F[i:i + 10, :].size())
     return F
     '''
 def forward(self, x, F):
     feature_size = F.size(-1)
     x = nn.AdaptiveAvgPool2d(feature_size)(x)
     x = x.view(self.batch_size, self.mc, -1, feature_size, feature_size)
     F = F.view(self.batch_size, self.mc, -1, feature_size, feature_size)
     F_y = torch.cat([x, F], 2)
     # F_y = torch.transpose(F_y, 1, 2).contiguous()
     # F_y = torch.add(x, F)
     F_y = F_y.view(self.batch_size, -1, feature_size, feature_size)
     return F_y
    def normalize_0_to_1(self, F):
        assert (F.dim() == 4)
        max_val = F.max()
        min_val = F.min()
        if max_val != min_val:
            F_normalized = (F - min_val) / (max_val - min_val)
        else:
            F_normalized = self.Tensor(F.size()).fill_(0)

        return F_normalized
Exemple #11
0
def mean_channels(F):
    assert (F.dim() == 4)
    spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True)
    return spatial_sum / (F.size(2) * F.size(3))