예제 #1
0
    def receive(self, *args):
        signal_m, signal_g_m = args[0], None
        if len(args) == 2:
            signal_g_m = args[1]

        return util.knn(self.k, signal_m, self.labels, signal_g_m)


            
예제 #2
0
파일: model.py 프로젝트: jiahaowork/idam
    def forward(self, x):
        nn_idx = knn(x, k=12)

        x = self.propogate1(x, nn_idx)
        x = self.propogate2(x, nn_idx)
        x = self.propogate3(x, nn_idx)
        x = self.propogate4(x, nn_idx)
        x = self.propogate5(x, nn_idx)

        return x
예제 #3
0
    def forward(self, x):
        # [B, 3, N]
        nn_idx = knn(x, k=12)  # [B, N, k], 最近邻索引

        x = self.propogate1(x, nn_idx)
        x = self.propogate2(x, nn_idx)
        x = self.propogate3(x, nn_idx)
        x = self.propogate4(x, nn_idx)
        x = self.propogate5(x, nn_idx)  # [B, emb_dims, N]

        return x
예제 #4
0
    def permatrix_best(self, x):
        bsize, num_feat, num_pts = x.size()
        neigh_indexs = knn(x, self.k)
        idx_base = torch.arange(0, bsize, device=x.device).view(-1, 1,
                                                                1) * num_pts
        neigh_indexs = (neigh_indexs + idx_base).view(
            -1)  # bsize*num_pts*spiral_size

        x_feats = x.permute(0, 2, 1).contiguous().view(bsize * num_pts, -1)
        x_feats = x_feats[neigh_indexs, :].view(bsize * num_pts, self.k, -1)
        x_repeat = x_feats[:, 0:1, :].expand_as(x_feats)
        x_relative = x_feats - x_repeat

        permatrix = torch.matmul(x_relative, self.kernels)
        permatrix = (permatrix + self.one_padding)  #
        permatrix = topkmax(
            permatrix
        )  # self.softmax(permatrix.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous() #
        return neigh_indexs, permatrix
예제 #5
0
def get_graph_feature(x, k=20, idx=None):
    bsize = x.size(0)
    num_points = x.size(2)
    x = x.view(bsize, -1, num_points)
    if idx is None:
        idx = knn(x, k=k)   # (bsize, num_points, k)
    device = torch.device('cuda')

    idx_base = torch.arange(0, bsize, device=device).view(-1, 1, 1)*num_points
    idx = idx + idx_base
    idx = idx.view(-1)
    _, num_dims, _ = x.size()

    x = x.transpose(2, 1).contiguous()   # (bsize, num_points, num_dims)  -> (bsize*num_points, num_dims) #   bsize * num_points * k + range(0, bsize*num_points)
    feature = x.view(bsize*num_points, -1)[idx, :]
    feature = feature.view(bsize, num_points, k, num_dims) 
    x = x.view(bsize, num_points, 1, num_dims).repeat(1, 1, k, 1)
    
    feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
  
    return feature
예제 #6
0
    def forward(self, x):
        bsize, feats, num_pts = x.size()

        # x0 = get_graph_feature(x, k=self.k)     # (bsize, 3, num_points) -> (bsize, 3*2, num_points, k)
        # t = self.transform_net(x0)              # (bsize, 3, 3)
        # x = x.transpose(2, 1)                   # (bsize, 3, num_points) -> (bsize, num_points, 3)
        # x = torch.bmm(x, t)                     # (bsize, num_points, 3) * (bsize, 3, 3) -> (bsize, num_points, 3)
        # x = x.transpose(2, 1)

        neigh_indexs = knn(x, self.k * self.num_layers)
        feature = self.conv1(x, x, neigh_indexs)
        x1 = feature.clone()

        feature = self.conv2(x, feature, neigh_indexs)
        x2 = feature.clone()

        feature = self.conv3(x, feature, neigh_indexs)
        x3 = feature.clone()

        feature = self.conv4(x, feature, neigh_indexs)
        x4 = feature.clone()
        feature = self.conv4a(x, feature, neigh_indexs)
        x4a = feature.clone()
        feature = self.conv4b(x, feature, neigh_indexs)
        x4b = feature.clone()

        x = torch.cat((x1, x2, x3, x4, x4a, x4b), dim=1)
        x = F.gelu(self.conv5(x))

        x1 = F.adaptive_max_pool1d(x, 1).view(bsize, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(bsize, -1)
        x = torch.cat((x1, x2), 1)

        x = F.gelu(self.bn6(self.linear1(x)))
        x = self.dp1(x)
        x = F.gelu(self.bn7(self.linear2(x)))
        x = self.dp2(x)
        x = self.linear3(x)
        return x
예제 #7
0
    def permatrix_lsa(self, x):
        bsize, num_feat, num_pts = x.size()

        neigh_indexs = knn(x, self.k)
        idx_base = torch.arange(0, bsize, device=x.device).view(-1, 1,
                                                                1) * num_pts
        neigh_indexs = (neigh_indexs + idx_base).view(
            -1)  # bsize*num_pts*spiral_size

        x_feats = x.permute(0, 2, 1).contiguous().view(bsize * num_pts, -1)
        x_feats = x_feats[neigh_indexs, :].view(bsize * num_pts, self.k, -1)
        x_repeat = x_feats[:, 0:1, :].expand_as(x_feats)
        x_relative = x_feats - x_repeat
        x_dis = torch.norm(x_relative, dim=-1, keepdim=True)
        x_feats = torch.cat([x_repeat, x_relative, x_dis],
                            dim=-1).view(bsize * num_pts, -1)

        x_feats = 2. * math.pi * x_feats @ self.B
        x_feats = torch.cat([torch.sin(x_feats), torch.cos(x_feats)], dim=-1)
        x_feats = self.softmax(self.mlp(x_feats))

        permatrix = torch.einsum('bi, ikt->bkt', x_feats, self.permatrix)
        return neigh_indexs, permatrix
예제 #8
0
    n = len(dataloader) * dataloader.batch_size
    print("Accuracy: ", correct / n)


if __name__ == '__main__':
    #constructDatasetCSV("../Signals/full_dataset/")
    #dataset = SignalDataset("../Signals/full_dataset/", "csv/dataset.csv", raw = False)
    dataset = TestDataset()
    train_size = int(0.8 * len(dataset))
    val_test_size = (len(dataset) - train_size) // 2
    train_dataset, validation_dataset, test_dataset = torch.utils.data.random_split(
        dataset, [train_size, val_test_size, val_test_size])

    train(train_dataset, validation_dataset)

    dataloader = DataLoader(train_dataset,
                            batch_size=1,
                            shuffle=True,
                            collate_fn=collate)
    model = torch.load("models/model.pt")
    X, y = get_latent(dataloader, model)
    predictor = knn(X, y, 3)
    visualize(X, y, dataset.get_distinct_labels())

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=True,
                                 collate_fn=collate)
    evaluate(test_dataloader)
    predict(predictor, model, test_dataloader)