예제 #1
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_norm,
                 layer_norm,
                 prior_sigma_1=0.1,
                 prior_sigma_2=0.001,
                 prior_pi=1.):

        super().__init__()
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.batch_norm = batch_norm
        self.layer_norm = layer_norm
        self.prior_sigma_1 = prior_sigma_1
        self.prior_sigma_2 = prior_sigma_2
        self.prior_pi = prior_pi

        # Multi-layer model
        self.linear_1 = BayesianLinear(hidden_dim, hidden_dim, bias=False, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)
        if self.batch_norm:
            self.bn = nn.BatchNorm1d((hidden_dim))
        if self.layer_norm:
            self.ln = nn.LayerNorm(hidden_dim)
        self.linear_2 = BayesianLinear(hidden_dim, hidden_dim, bias=False, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)
예제 #2
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        self.readout = net_params['readout']  # this is graph_pooling_type
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']
        else:
            self.num_classes = 1

        # Prior Length
        self.prior_sigma_1 = net_params['bbp_prior_sigma_1']
        self.prior_sigma_2 = net_params['bbp_prior_sigma_2']
        self.prior_pi = net_params['bbp_prior_pi']

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        self.embedding_lin = BayesianLinear(num_atom_type, hidden_dim, bias=False, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)

        for layer in range(self.n_layers):
            mlp = MLP(hidden_dim, hidden_dim, hidden_dim, self.batch_norm, self.layer_norm, \
                    self.prior_sigma_1, self.prior_sigma_2, self.prior_pi)
            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         self.graph_norm, self.batch_norm, self.layer_norm, 0,
                         learn_eps))

        # Linear function for graph poolings (readout) of output of each layer
        # which maps the output of different layers into a prediction score

        self.linear_ro = BayesianLinear(hidden_dim, out_dim, bias=False, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)
        self.linear_prediction = BayesianLinear(out_dim, self.num_classes, bias=True, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)

        #	additional parameters for gated residual connection
        if self.residual == 'gated':
            self.W_g = BayesianLinear(2*hidden_dim, hidden_dim, bias=False, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)
    def __init__(self,
                 in_feats,
                 out_feats,
                 activation,
                 dropout,
                 concat_norm,
                 bias=False,
                 prior_sigma_1=0.1,
                 prior_sigma_2=0.001,
                 prior_pi=1.):
        super().__init__()

        self.prior_sigma_1 = prior_sigma_1
        self.prior_sigma_2 = prior_sigma_2
        self.prior_pi = prior_pi

        self.dropout = nn.Dropout(p=dropout)
        self.linear = BayesianLinear(in_feats * 2,
                                     out_feats,
                                     bias,
                                     prior_sigma_1=self.prior_sigma_1,
                                     prior_sigma_2=self.prior_sigma_2,
                                     prior_pi=self.prior_pi)
        self.activation = activation
        self.concat_norm = concat_norm
    def __init__(self, in_dim, out_dim, \
            prior_sigma_1=0.1, prior_sigma_2=0.001, prior_pi=1.):
        super().__init__()

        self.prior_sigma_1 = prior_sigma_1
        self.prior_sigma_2 = prior_sigma_2
        self.prior_pi = prior_pi

        self.linear = BayesianLinear(in_dim, out_dim, bias=False, \
                prior_sigma_1=self.prior_sigma_1, prior_sigma_2=self.prior_sigma_2, prior_pi=self.prior_pi)
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_heads,
                 dropout,
                 graph_norm,
                 batch_norm,
                 layer_norm,
                 att_reduce_fn="softmax",
                 prior_sigma_1=0.1,
                 prior_sigma_2=0.001,
                 prior_pi=1.):
        super().__init__()
        out_dim = out_dim // num_heads
        self.dropout = dropout
        self.graph_norm = graph_norm
        self.batch_norm = batch_norm
        self.layer_norm = layer_norm

        self.fc = BayesianLinear(in_dim,
                                 out_dim,
                                 bias=False,
                                 prior_sigma_1=prior_sigma_1,
                                 prior_sigma_2=prior_sigma_2,
                                 prior_pi=prior_pi)
        self.attn_fc = BayesianLinear(2 * out_dim,
                                      1,
                                      bias=False,
                                      prior_sigma_1=prior_sigma_1,
                                      prior_sigma_2=prior_sigma_2,
                                      prior_pi=prior_pi)
        if batch_norm:
            self.batchnorm_h = nn.BatchNorm1d(out_dim)
        if layer_norm:
            self.layernorm_h = nn.LayerNorm(out_dim)

        self.att_reduce_fn = att_reduce_fn
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_heads,
                 dropout,
                 graph_norm,
                 batch_norm,
                 layer_norm,
                 residual=False,
                 att_reduce_fn="softmax",
                 prior_sigma_1=0.1,
                 prior_sigma_2=0.001,
                 prior_pi=1.):
        super().__init__()
        self.in_channels = in_dim
        self.out_channels = out_dim
        self.num_heads = num_heads
        self.residual = residual
        self.att_reduce_fn = att_reduce_fn

        self.heads = nn.ModuleList()
        for i in range(num_heads):
            self.heads.append(
                GATHeadLayer(in_dim,
                             out_dim,
                             num_heads,
                             dropout,
                             graph_norm,
                             batch_norm,
                             layer_norm,
                             att_reduce_fn,
                             prior_sigma_1=prior_sigma_1,
                             prior_sigma_2=prior_sigma_2,
                             prior_pi=prior_pi))
        self.linear_concat = BayesianLinear(out_dim,
                                            out_dim,
                                            bias=False,
                                            prior_sigma_1=prior_sigma_1,
                                            prior_sigma_2=prior_sigma_2,
                                            prior_pi=prior_pi)
        self.merge = 'cat'
예제 #7
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        num_heads = net_params['n_heads']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']

        self.att_reduce_fn = net_params['att_reduce_fn']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']

        self.prior_sigma_1 = net_params['bbp_prior_sigma_1']
        self.prior_sigma_2 = net_params['bbp_prior_sigma_2']
        self.prior_pi = net_params['bbp_prior_pi']

        self.dropout = dropout

        self.embedding_lin = BayesianLinear(num_atom_type,
                                            hidden_dim,
                                            bias=False,
                                            prior_sigma_1=self.prior_sigma_1,
                                            prior_sigma_2=self.prior_sigma_2,
                                            prior_pi=self.prior_pi)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(hidden_dim,
                     hidden_dim,
                     num_heads,
                     dropout,
                     self.graph_norm,
                     self.batch_norm,
                     self.layer_norm,
                     self.att_reduce_fn,
                     prior_sigma_1=self.prior_sigma_1,
                     prior_sigma_2=self.prior_sigma_2,
                     prior_pi=self.prior_pi) for _ in range(n_layers)
        ])

        self.linear_ro = BayesianLinear(hidden_dim,
                                        out_dim,
                                        bias=False,
                                        prior_sigma_1=self.prior_sigma_1,
                                        prior_sigma_2=self.prior_sigma_2,
                                        prior_pi=self.prior_pi)
        self.linear_predict = BayesianLinear(out_dim,
                                             1,
                                             bias=True,
                                             prior_sigma_1=self.prior_sigma_1,
                                             prior_sigma_2=self.prior_sigma_2,
                                             prior_pi=self.prior_pi)

        #	additional parameters for gated gcn
        if self.residual == "gated":
            self.W_g = nn.Linear(2 * hidden_dim,
                                 hidden_dim,
                                 False,
                                 prior_sigma_1=self.prior_sigma_1,
                                 prior_sigma_2=self.prior_sigma_2,
                                 prior_pi=self.prior_pi)
예제 #8
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dropout,
                 graph_norm,
                 batch_norm,
                 layer_norm,
                 gated_gcn_agg,
                 prior_sigma_1=0.1,
                 prior_sigma_2=0.001,
                 prior_pi=1.):  #, residual=False):
        super().__init__()
        self.in_channels = input_dim
        self.out_channels = output_dim
        self.dropout = dropout
        self.graph_norm = graph_norm
        self.batch_norm = batch_norm
        self.layer_norm = layer_norm
        self.gated_gcn_agg = gated_gcn_agg

        self.prior_sigma_1 = prior_sigma_1
        self.prior_sigma_2 = prior_sigma_2
        self.prior_pi = prior_pi

        self.A = BayesianLinear(input_dim,
                                output_dim,
                                bias=False,
                                prior_sigma_1=self.prior_sigma_1,
                                prior_sigma_2=self.prior_sigma_2,
                                prior_pi=self.prior_pi)
        self.B = BayesianLinear(input_dim,
                                output_dim,
                                bias=False,
                                prior_sigma_1=self.prior_sigma_1,
                                prior_sigma_2=self.prior_sigma_2,
                                prior_pi=self.prior_pi)
        self.C = BayesianLinear(input_dim,
                                output_dim,
                                bias=False,
                                prior_sigma_1=self.prior_sigma_1,
                                prior_sigma_2=self.prior_sigma_2,
                                prior_pi=self.prior_pi)
        self.D = BayesianLinear(input_dim,
                                output_dim,
                                bias=False,
                                prior_sigma_1=self.prior_sigma_1,
                                prior_sigma_2=self.prior_sigma_2,
                                prior_pi=self.prior_pi)
        self.E = BayesianLinear(input_dim,
                                output_dim,
                                bias=False,
                                prior_sigma_1=self.prior_sigma_1,
                                prior_sigma_2=self.prior_sigma_2,
                                prior_pi=self.prior_pi)

        if batch_norm:
            self.bn_node_h = nn.BatchNorm1d(output_dim)
            self.bn_node_e = nn.BatchNorm1d(output_dim)
        if layer_norm:
            self.ln_node_h = nn.LayerNorm(output_dim)
            self.ln_node_e = nn.LayerNorm(output_dim)
 def __init__(self, input_dim, output_dim, bias=False, L=2): #L=nb_hidden_layers
     super().__init__()
     list_FC_layers = [ BayesianLinear( input_dim, input_dim, bias=bias ) for l in range(L) ]
     list_FC_layers.append(BayesianLinear( input_dim, output_dim , bias=bias ))
     self.FC_layers = nn.ModuleList(list_FC_layers)
     self.L = L
예제 #10
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.readout = net_params['readout']
        self.residual = net_params['residual']
        self.concat_norm = net_params['concat_norm']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']
        else:
            self.num_classes = 1

        self.prior_sigma_1 = net_params['bbp_prior_sigma_1']
        self.prior_sigma_2 = net_params['bbp_prior_sigma_2']
        self.prior_pi = net_params['bbp_prior_pi']

        self.embedding_lin = BayesianLinear(num_atom_type,
                                            hidden_dim,
                                            bias=False,
                                            prior_sigma_1=self.prior_sigma_1,
                                            prior_sigma_2=self.prior_sigma_2,
                                            prior_pi=self.prior_pi)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(hidden_dim,
                           hidden_dim,
                           F.relu,
                           dropout,
                           aggregator_type,
                           self.batch_norm,
                           self.graph_norm,
                           self.layer_norm,
                           self.concat_norm,
                           prior_sigma_1=self.prior_sigma_1,
                           prior_sigma_2=self.prior_sigma_2,
                           prior_pi=self.prior_pi) for _ in range(n_layers)
        ])
        self.linear_ro = BayesianLinear(hidden_dim,
                                        out_dim,
                                        bias=False,
                                        prior_sigma_1=self.prior_sigma_1,
                                        prior_sigma_2=self.prior_sigma_2,
                                        prior_pi=self.prior_pi)
        self.linear_predict = BayesianLinear(out_dim,
                                             self.num_classes,
                                             bias=True,
                                             prior_sigma_1=self.prior_sigma_1,
                                             prior_sigma_2=self.prior_sigma_2,
                                             prior_pi=self.prior_pi)

        #	additional parameters for gated residual connection
        if self.residual == "gated":
            self.W_g = BayesianLinear(2 * hidden_dim,
                                      hidden_dim,
                                      False,
                                      prior_sigma_1=self.prior_sigma_1,
                                      prior_sigma_2=self.prior_sigma_2,
                                      prior_pi=self.prior_pi)
예제 #11
0
 def __init__(self, in_feats, out_feats, activation, bias):
     super().__init__()
     self.linear = BayesianLinear(in_feats, out_feats, bias=bias)
     self.activation = activation