Ejemplo n.º 1
0
 def reset_parameters(self):
     glorot(self.weight)
     if cfg.gnn.self_msg == 'concat':
         glorot(self.weight_self)
     zeros(self.bias)
     self.cached_result = None
     self.cached_num_edges = None
Ejemplo n.º 2
0
 def reset_parameters(self):
     glorot(self.att_msg)
     if self.task_channels is not None:
         glorot(self.att_task)
     zeros(self.bias)
     self.cached_result = None
     self.cached_num_edges = None
Ejemplo n.º 3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 improved=False,
                 cached=False,
                 bias=True,
                 **kwargs):
        super(GCNConv, self).__init__()
        #        super(GCNConv, self).__init__(aggr='add', **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.improved = improved
        self.cached = cached

        self.weight = Parameter(torch.Tensor(in_channels, out_channels))

        if bias:
            self.bias = Parameter(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        glorot(self.weight)
        zeros(self.bias)
        self.cached_result = None
        self.cached_num_edges = None
Ejemplo n.º 4
0
 def reset_parameters(self):
     # original initialization
     # uniform(self.weight.size(0), self.weight)
     # uniform(self.weight.size(0), self.bias)
     # change to new initialization
     glorot(self.weight)
     zeros(self.bias)
Ejemplo n.º 5
0
 def reset_parameters(self):
     self.lin_l.reset_parameters()
     self.lin_r.reset_parameters()
     if self.lin_edge is not None:
         self.lin_edge.reset_parameters()
     glorot(self.att)
     zeros(self.bias)
Ejemplo n.º 6
0
 def reset_parameters(self):
     self.lin_msg.reset_parameters()
     self.lin_self.reset_parameters()
     if self.in_edge_channels is not None:
         self.lin_edge.reset_parameters()
     if self.attention and self.attention_type == 'additive':
         glorot(self.att_msg)
Ejemplo n.º 7
0
 def reset_parameters(self):
     # uniform(self.weight.size(0), self.weight)
     # uniform(self.weight.size(0), self.weight_id)
     # uniform(self.weight.size(0), self.bias)
     glorot(self.weight)
     glorot(self.weight_id)
     zeros(self.bias)
Ejemplo n.º 8
0
    def __init__(self,
                 emb_dim,
                 aggr="add",
                 input_layer=False,
                 heads=3,
                 negative_slope=0.2):
        super(GSANConv, self).__init__()

        self.aggr = aggr

        self.emb_dim = emb_dim
        self.heads = heads
        self.negative_slope = negative_slope

        # multi-layer perceptron
        self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim),
                                       torch.nn.BatchNorm1d(emb_dim),
                                       torch.nn.ReLU(),
                                       torch.nn.Linear(emb_dim, emb_dim))

        ### Mapping 0/1 edge features to embedding
        self.edge_encoder = torch.nn.Linear(9, heads * emb_dim)

        ### Mapping uniform input features to embedding.
        self.input_layer = input_layer
        if self.input_layer:
            self.input_node_embeddings = torch.nn.Embedding(2, emb_dim)
            torch.nn.init.xavier_uniform_(
                self.input_node_embeddings.weight.data)

        self.att = torch.nn.Parameter(torch.Tensor(1, heads, 2 * emb_dim))
        self.bias = torch.nn.Parameter(torch.Tensor(emb_dim))
        glorot(self.att)
        zeros(self.bias)
Ejemplo n.º 9
0
 def __init__(self, out_channels_dual):
     super(DualPrimalEdgeUnpooling, self).__init__()
     self.__out_channels_dual = out_channels_dual
     self.new_dual_feature = torch.nn.Parameter(
         torch.Tensor(1, out_channels_dual))
     # Initialize parameter.
     glorot(self.new_dual_feature)
Ejemplo n.º 10
0
    def reset_parameters(self):
        if self.in_channels <= 0:
            pass
        elif self.weight_initializer == 'glorot':
            inits.glorot(self.weight)
        elif self.weight_initializer == 'uniform':
            bound = 1.0 / math.sqrt(self.weight.size(-1))
            torch.nn.init.uniform_(self.weight.data, -bound, bound)
        elif self.weight_initializer == 'kaiming_uniform':
            inits.kaiming_uniform(self.weight,
                                  fan=self.in_channels,
                                  a=math.sqrt(5))
        elif self.weight_initializer is None:
            inits.kaiming_uniform(self.weight,
                                  fan=self.in_channels,
                                  a=math.sqrt(5))
        else:
            raise RuntimeError(f"Linear layer weight initializer "
                               f"'{self.weight_initializer}' is not supported")

        if self.bias is None or self.in_channels <= 0:
            pass
        elif self.bias_initializer == 'zeros':
            inits.zeros(self.bias)
        elif self.bias_initializer is None:
            inits.uniform(self.in_channels, self.bias)
        else:
            raise RuntimeError(f"Linear layer bias initializer "
                               f"'{self.bias_initializer}' is not supported")
Ejemplo n.º 11
0
 def reset_parameters(self):
     for _ in self.emb_trans:
         glorot(_.weight)
     glorot(self.user_emb.weight)
     glorot(self.item_emb.weight)
     glorot(self.user_rk_bias.weight)
     glorot(self.item_rk_bias.weight)
Ejemplo n.º 12
0
 def reset_parameters(self):
     for i in range(self.num_node_type):
         glorot(getattr(self, "node_weight_%d" % i))
         # torch.nn.init.uniform(getattr(self, "node_weight_%d" % i))
         zeros(getattr(self, "node_bias_%d" % i))
     for i in range(self.num_edge_type):
         glorot(getattr(self, "edge_weight_%d" % i))
Ejemplo n.º 13
0
    def __init__(self, args):
        super(simpleGCN, self).__init__()
        self.dataset = args.dataset
        self.num_layers = args.num_layers
        self.num_feats = args.num_feats
        self.num_classes = args.num_classes
        self.dim_hidden = args.dim_hidden

        self.dropout = args.dropout
        self.type_norm = args.type_norm
        self.skip_weight = args.skip_weight
        self.num_groups = args.num_groups
        self.norm_weight = None
        self.aggr = 'add'

        self.layers_activation = torch.nn.functional.relu
        self.layers_bn = nn.ModuleList([])
        self.weight = Parameter(torch.Tensor(self.num_feats, self.num_classes))
        glorot(self.weight)

        for i in range(self.num_layers):
            if self.type_norm in ['None', 'batch', 'pair']:
                skip_connect = False
            else:
                skip_connect = True
            self.layers_bn.append(
                batch_norm(self.num_classes, self.type_norm, skip_connect,
                           self.num_groups, self.skip_weight))
Ejemplo n.º 14
0
 def reset_parameters(self):
     glorot(self.weight)
     glorot(self.weight1)
     glorot(self.weight2)
     glorot(self.att)
     glorot(self.att2)
     zeros(self.bias)
     zeros(self.bias2)
Ejemplo n.º 15
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_types,
                 num_relations,
                 n_heads,
                 dropout=0.2,
                 use_norm=True,
                 use_RTE=True,
                 **kwargs):
        super(DenseHGTConv, self).__init__(node_dim=0, aggr='add', **kwargs)

        self.in_dim = in_dim
        self.out_dim = out_dim
        self.num_types = num_types
        self.num_relations = num_relations
        self.total_rel = num_types * num_relations * num_types
        self.n_heads = n_heads
        self.d_k = out_dim // n_heads
        self.sqrt_dk = math.sqrt(self.d_k)
        self.use_norm = use_norm
        self.use_RTE = use_RTE
        self.att = None

        self.k_linears = nn.ModuleList()
        self.q_linears = nn.ModuleList()
        self.v_linears = nn.ModuleList()
        self.a_linears = nn.ModuleList()
        self.norms = nn.ModuleList()

        for t in range(num_types):
            self.k_linears.append(nn.Linear(in_dim, out_dim))
            self.q_linears.append(nn.Linear(in_dim, out_dim))
            self.v_linears.append(nn.Linear(in_dim, out_dim))
            self.a_linears.append(nn.Linear(out_dim, out_dim))
            if use_norm:
                self.norms.append(nn.LayerNorm(out_dim))
        '''
            TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.
        '''
        self.relation_pri = nn.Parameter(
            torch.ones(num_relations, self.n_heads))
        self.relation_att = nn.Parameter(
            torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
        self.relation_msg = nn.Parameter(
            torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
        self.drop = nn.Dropout(dropout)

        if self.use_RTE:
            self.emb = RelTemporalEncoding(in_dim)

        glorot(self.relation_att)
        glorot(self.relation_msg)

        self.mid_linear = nn.Linear(out_dim, out_dim * 2)
        self.out_linear = nn.Linear(out_dim * 2, out_dim)
        self.out_norm = nn.LayerNorm(out_dim)
Ejemplo n.º 16
0
 def reset_parameters(self):
     reset(self.k_lin)
     reset(self.q_lin)
     reset(self.v_lin)
     reset(self.a_lin)
     ones(self.skip)
     ones(self.p_rel)
     glorot(self.a_rel)
     glorot(self.m_rel)
Ejemplo n.º 17
0
 def __init__(self, num_features, num_classes):
     super(MLP, self).__init__()
     self.fc1 = nn.Linear(num_features, 16)
     self.fc2 = nn.Linear(16, num_classes)
     # torch.nn.init.xavier_uniform_(self.fc1.weight)
     # torch.nn.init.xavier_uniform_(self.fc2.weight)
     glorot(self.fc1.weight)
     zeros(self.fc1.bias)
     glorot(self.fc2.weight)
     zeros(self.fc2.bias)
Ejemplo n.º 18
0
 def reset_parameters(self):
     tgi.glorot(self.weight)
     tgi.zeros(self.bias)
     for name, param in self.named_parameters():
         if name.startswith("att_scaling"):
             tgi.ones(param)
         elif name.startswith("att_bias"):
             tgi.zeros(param)
         elif name.startswith("att_mh"):
             tgi.glorot(param)
Ejemplo n.º 19
0
 def __init__(self, in_channels: int, out_channels: int, K: int,
              embedding_dimensions: int):
     super(AVWGCN, self).__init__()
     self.K = K
     self.weights_pool = torch.nn.Parameter(
         torch.Tensor(embedding_dimensions, K, in_channels, out_channels))
     self.bias_pool = torch.nn.Parameter(
         torch.Tensor(embedding_dimensions, out_channels))
     glorot(self.weights_pool)
     zeros(self.bias_pool)
Ejemplo n.º 20
0
 def reset_parameters(self):
     glorot(self.lin_l.weight)
     glorot(self.lin_r.weight)
     # glorot(self.lin_e.weight) # for edge feature
     # glorot(self.att_l)
     # glorot(self.att_r)
     glorot(self.proj_cmd.weight)
     glorot(self.cal_cmd.weight)
     glorot(self.cal_x.weight)
     # glorot(self.att_e) # for edge feature
     zeros(self.bias)
Ejemplo n.º 21
0
 def _initialize_alphas(self):
     num_ops = len(PRIMITIVES)
     self.alphas_normal = []
     self.att = nn.Parameter(torch.Tensor(3, 5))
     for i in range(self._steps):
         for n in range(2 + i):
             self.alphas_normal.append(
                 Variable(1e-3 * torch.randn(num_ops).cuda(),
                          requires_grad=True))
     self._arch_parameters = [self.alphas_normal, self.att]
     inits.glorot(self.att)
Ejemplo n.º 22
0
    def __init__(self, in_dim, hidden_dim, x_num_day):
        super().__init__()
        self.fc_weight = Parameter(torch.Tensor(in_dim, 1, hidden_dim))
        self.fc_bias = Parameter(torch.Tensor(in_dim, hidden_dim))
        glorot(self.fc_weight)
        zeros(self.fc_bias)
        self.conv1s = [GCNTensorInteraction(in_dim, 1, hidden_dim, interaction_ftr_dim = 3).cuda() for _ in range(x_num_day)]
        self.conv2s = [GCNTensorInteraction(in_dim, hidden_dim, hidden_dim, interaction_ftr_dim = 3).cuda() for _ in range(x_num_day)]

        # add more if necessary
        self.rnn = IMVTensorLSTM(in_dim, 1, hidden_dim, hidden_dim *2).cuda()
        self.x_num_day = x_num_day
Ejemplo n.º 23
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_types,
                 num_relations,
                 n_heads,
                 dropout=0.2,
                 use_norm=True,
                 use_RTE=False,
                 **kwargs):
        super(HGTConv, self).__init__(aggr='add', **kwargs)

        self.in_dim = in_dim
        self.out_dim = out_dim
        self.num_types = num_types
        self.num_relations = num_relations
        self.total_rel = num_types * num_relations * num_types
        self.n_heads = n_heads
        self.d_k = out_dim // n_heads
        self.sqrt_dk = math.sqrt(self.d_k)
        self.use_norm = use_norm
        self.use_RTE = use_RTE
        self.att = None

        self.k_linears = nn.ModuleList()
        self.q_linears = nn.ModuleList()
        self.v_linears = nn.ModuleList()
        self.a_linears = nn.ModuleList()
        self.norms = nn.ModuleList()

        for t in range(num_types):
            self.k_linears.append(nn.Linear(in_dim, out_dim))
            self.q_linears.append(nn.Linear(in_dim, out_dim))
            self.v_linears.append(nn.Linear(in_dim, out_dim))
            self.a_linears.append(nn.Linear(out_dim, out_dim))
            if use_norm:
                self.norms.append(nn.LayerNorm(out_dim))

        self.relation_pri = nn.Parameter(
            torch.ones(self.num_relations, self.n_heads))
        self.relation_att = nn.Parameter(
            torch.Tensor(self.num_relations, self.n_heads, self.d_k, self.d_k))
        self.relation_msg = nn.Parameter(
            torch.Tensor(self.num_relations, self.n_heads, self.d_k, self.d_k))
        self.skip = nn.Parameter(torch.ones(num_types))
        self.dropout = nn.Dropout(dropout)

        if self.use_RTE:
            pass  # 增加时序信息的建模

        glorot(self.relation_att)
        glorot(self.relation_msg)
Ejemplo n.º 24
0
    def reset_parameters(self):
        for lin in self.rel_lins:
            lin.reset_parameters()
        for lin in self.root_lins:
            lin.reset_parameters()
        if self.args.Norm4:
            for n in self.msg_norm:
                n.reset_parameters()
            for n in self.layer_norm:
                n.reset_parameters()

        glorot(self.intra_attn_l)
        glorot(self.intra_attn_r)
Ejemplo n.º 25
0
 def reset_parameters(self):
     self.lin_key.reset_parameters()
     self.lin_query.reset_parameters()
     self.lin_value.reset_parameters()
     self.lin_key_r.reset_parameters()
     self.lin_query_r.reset_parameters()
     self.lin_value_r.reset_parameters()
     if self.edge_dim:
         self.lin_edge.reset_parameters()
     self.lin_skip.reset_parameters()
     self.lin_skip_node.reset_parameters()
     if self.beta:
         self.lin_beta.reset_parameters()
     glorot(self.w_relation)
Ejemplo n.º 26
0
    def __init__(self, in_features, out_features, dropout, concat=True):
        super(AltGraphAttentionLayer, self).__init__()
        self.dropout = dropout
        self.in_features = in_features
        self.out_features = out_features
        self.concat = concat

        # W_in_features = in_features
        
        self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
        glorot(self.W)
        self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
        glorot(self.a)

        self.act = nn.LeakyReLU()
Ejemplo n.º 27
0
def test_inits():
    x = torch.empty(1, 4)

    uniform(size=4, tensor=x)
    assert x.min() >= -0.5
    assert x.max() <= 0.5

    glorot(x)
    assert x.min() >= -1.25
    assert x.max() <= 1.25

    zeros(x)
    assert x.tolist() == [[0, 0, 0, 0]]

    ones(x)
    assert x.tolist() == [[1, 1, 1, 1]]
Ejemplo n.º 28
0
 def reset_parameters(self):
     self.lin_pos.reset_parameters()
     self.lin_neg.reset_parameters()
     glorot(self.lin_pos_agg.weight)
     glorot(self.lin_neg_agg.weight)
     glorot(self.att_i)
     glorot(self.att_j)
     init.xavier_uniform_(self.weight, gain=math.sqrt(2))
     init.constant_(self.bias, 0)
Ejemplo n.º 29
0
 def _set_parameters(self):
     glorot(self.W_i)
     glorot(self.W_f)
     glorot(self.W_c)
     glorot(self.W_o)
     zeros(self.b_i)
     zeros(self.b_f)
     zeros(self.b_c)
     zeros(self.b_o)
Ejemplo n.º 30
0
 def reset_parameters(self):
     if not self.if_use_features:
         glorot(self.x)
     for module in self.pea_channels:
         module.reset_parameters()
     glorot(self.fc1.weight)
     glorot(self.fc2.weight)
     if self.channel_aggr == 'att':
         glorot(self.att)