예제 #1
0
 def __init__(self, num_channels, eps=1e-5, momentum=0.1):
     super(myBN, self).__init__()
     # momentum for updating running stats
     self.momentum = momentum
     # epsilon to avoid dividing by 0
     self.eps = eps
     # Momentum
     self.momentum = momentum
     # Register buffers
     self.stored_mean = torch.nn.Parameter(torch.zeros(num_channels))
     self.stored_var = torch.nn.Parameter(torch.ones(num_channels))
     self.accumulation_counter = torch.nn.Parameter(torch.zeros(1))
     # Accumulate running means and vars
     self.accumulate_standing = False
예제 #2
0
    def __init__(self,
                 output_size,
                 eps=1e-5,
                 momentum=0.1,
                 cross_replica=False,
                 mybn=False):
        super(bn, self).__init__()
        self.output_size = output_size
        # Prepare gain and bias layers
        self.gain = torch.nn.Parameter(output_size, 1.0)
        self.bias = torch.nn.Parameter(output_size, 0.0)
        # epsilon to avoid dividing by 0
        self.eps = eps
        # Momentum
        self.momentum = momentum
        # Use cross-replica batchnorm?
        self.cross_replica = cross_replica
        # Use my batchnorm?
        self.mybn = mybn

        if self.cross_replica:
            self.bn = SyncBN2d(output_size,
                               eps=self.eps,
                               momentum=self.momentum,
                               affine=False)
        elif mybn:
            self.bn = myBN(output_size, self.eps, self.momentum)
        # Register buffers if neither of the above
        else:
            self.stored_mean = torch.nn.Parameter(torch.zeros(output_size))
            self.stored_var = torch.nn.Parameter(torch.ones(output_size))
예제 #3
0
 def __init__(self,
              num_features,
              eps=1e-5,
              momentum=0.1,
              affine=True,
              track_running_stats=True):
     super(_BatchNormBase, self).__init__()
     self.num_features = num_features
     self.eps = eps
     self.momentum = momentum
     self.affine = affine
     self.track_running_stats = track_running_stats
     if self.affine:
         self.weight = Parameter(torch.Ones(num_features))
         self.bias = Parameter(torch.Zeros(num_features))
     else:
         self.register_parameter('weight', None)
         self.register_parameter('bias', None)
     if self.track_running_stats:
         self.register_buffer('running_mean', torch.zeros(num_features))
         self.register_buffer('running_var', torch.ones(num_features))
         self.register_buffer('num_batches_tracked',
                              torch.tensor(0, dtype=torch.long))
     else:
         self.register_parameter('running_mean', None)
         self.register_parameter('running_var', None)
         self.register_parameter('num_batches_tracked', None)
     self.reset_parameters()
예제 #4
0
    def _preprocess(self, edge_list_path, node_label_path):
        with open(edge_list_path) as f:
            edge_list = []
            node2id = defaultdict(int)
            for line in f:
                x, y = list(map(int, line.split()))
                # Reindex
                if x not in node2id:
                    node2id[x] = len(node2id)
                if y not in node2id:
                    node2id[y] = len(node2id)
                edge_list.append([node2id[x], node2id[y]])
                edge_list.append([node2id[y], node2id[x]])

        num_nodes = len(node2id)
        with open(node_label_path) as f:
            nodes = []
            labels = []
            label2id = defaultdict(int)
            for line in f:
                x, label = list(map(int, line.split()))
                if label not in label2id:
                    label2id[label] = len(label2id)
                nodes.append(node2id[x])
                if "hindex" in self.name:
                    labels.append(label)
                else:
                    labels.append(label2id[label])
            if "hindex" in self.name:
                median = np.median(labels)
                labels = [int(label > median) for label in labels]
        assert num_nodes == len(set(nodes))
        y = torch.zeros(num_nodes, len(label2id))
        y[nodes, labels] = 1
        return torch.LongTensor(edge_list).t(), y, node2id
예제 #5
0
def _rwr_trace_to_dgl_graph(g,
                            seed,
                            trace,
                            positional_embedding_size,
                            entire_graph=False):
    subv = torch.unique(torch.cat(trace)).detach().cpu().numpy().tolist()
    try:
        subv.remove(seed)
    except ValueError:
        pass
    subv = [seed] + subv
    if entire_graph:
        subg = g.subgraph(g.nodes())
    else:
        subg = g.subgraph(subv)

    subg = _add_undirected_graph_positional_embedding(
        subg, positional_embedding_size)

    subg.ndata["seed"] = torch.zeros(subg.number_of_nodes(), dtype=torch.long)
    if entire_graph:
        subg.ndata["seed"][seed] = 1
    else:
        subg.ndata["seed"][0] = 1
    return subg
예제 #6
0
def eigen_decomposision(n, k, laplacian, hidden_size, retry):
    if k <= 0:
        return torch.zeros(n, hidden_size)
    laplacian = laplacian.astype("float64")
    ncv = min(n, max(2 * k + 1, 20))
    # follows https://stackoverflow.com/questions/52386942/scipy-sparse-linalg-eigsh-with-fixed-seed
    v0 = np.random.rand(n).astype("float64")
    for i in range(retry):
        try:
            s, u = linalg.eigsh(laplacian, k=k, which="LA", ncv=ncv, v0=v0)
        except sparse.linalg.eigen.arpack.ArpackError:
            # print("arpack error, retry=", i)
            ncv = min(ncv * 2, n)
            if i + 1 == retry:
                sparse.save_npz("arpack_error_sparse_matrix.npz", laplacian)
                u = torch.zeros(n, k)
        else:
            break
    x = preprocessing.normalize(u, norm="l2")
    x = torch.from_numpy(x.astype("float32"))
    x = F.pad(x, (0, hidden_size - k), "constant", 0)
    return x
예제 #7
0
    def __init__(
        self,
        output_size,
        input_size,
        which_linear,
        eps=1e-5,
        momentum=0.1,
        cross_replica=False,
        mybn=False,
        norm_style='bn',
    ):
        super(ccbn, self).__init__()
        self.output_size, self.input_size = output_size, input_size
        # Prepare gain and bias layers
        self.gain = which_linear(input_size, output_size)
        self.bias = which_linear(input_size, output_size)
        # epsilon to avoid dividing by 0
        self.eps = eps
        # Momentum
        self.momentum = momentum
        # Use cross-replica batchnorm?
        self.cross_replica = cross_replica
        # Use my batchnorm?
        self.mybn = mybn
        # Norm style?
        self.norm_style = norm_style

        if self.cross_replica:
            self.bn = SyncBN2d(output_size,
                               eps=self.eps,
                               momentum=self.momentum,
                               affine=False)
        elif self.mybn:
            self.bn = myBN(output_size, self.eps, self.momentum)
        elif self.norm_style in ['bn', 'in']:
            self.stored_mean = torch.nn.Parameter(torch.zeros(output_size))
            self.stored_var = torch.nn.Parameter(torch.ones(output_size))
예제 #8
0
 def forward(self, x):
     bsz = x.shape[0]
     x = x.squeeze()
     label = torch.zeros([bsz]).cuda().long()
     loss = self.criterion(x, label)
     return loss
예제 #9
0
        e_feat = None
        if self.gnn_model == "gin":
            x, all_outputs = self.gnn(g, n_feat, e_feat)
        else:
            x, all_outputs = self.gnn(g, n_feat, e_feat), None
            x = self.set2set(g, x)
            x = self.lin_readout(x)
        if self.norm:
            x = F.normalize(x, p=2, dim=-1, eps=1e-5)
        if return_all_outputs:
            return x, all_outputs
        else:
            return x


if __name__ == "__main__":
    model = GraphEncoder(gnn_model="gin")
    print(model)
    g = dgl.DGLGraph()
    g.add_nodes(3)
    g.add_edges([0, 0, 1, 2], [1, 2, 2, 1])
    g.ndata["pos_directed"] = torch.rand(3, 16)
    g.ndata["pos_undirected"] = torch.rand(3, 16)
    g.ndata["seed"] = torch.zeros(3, dtype=torch.long)
    g.ndata["nfreq"] = torch.ones(3, dtype=torch.long)
    g.edata["efreq"] = torch.ones(4, dtype=torch.long)
    g = dgl.batch([g, g, g])
    y = model(g)
    print(y.shape)
    print(y)
예제 #10
0
import paddorch
from paddorch import index_copy_inplace_nograd

memory = paddorch.zeros((4, 3))
k = paddorch.arange(0, 6).view(2, 3)
out_ids = paddorch.LongTensor([1, 3])

index_copy_inplace_nograd(memory, 0, out_ids, k)
print("paddorch", memory)

import torch

memory = torch.zeros((4, 3))
k = torch.arange(0, 6).view(2, 3).float()
out_ids = torch.LongTensor([1, 3])

memory.index_copy_(0, out_ids, k)
print("pytorch", memory)