예제 #1
0
def create_graph_conv_layers(gconv_args):
    layers = []
    for gc_arg in gconv_args:
        gconv = GraphConvLayer(*gc_arg.args)
        layers.append(gconv)

        # Batch normalization
        if gc_arg.use_batch_norm:
            bn = nn.BatchNorm1d(gc_arg[1])
            layers.append(bn)

        # Dropout
        if gc_arg.dropout > -1:
            dr = nn.Dropout(gc_arg.dropout)
            layers.append(dr)

        # Pooling
        if gc_arg.graph_pool:
            p = GraphPool(gc_arg[2], gc_arg[3])
            layers.append(p)

        # Dense layer & normalization & dropout
        layers.append(nn.Linear(gc_arg[1], gc_arg.dense_layer_size))
        layers.append(nn.BatchNorm1d(gc_arg.dense_layer_size))
        if gc_arg.dropout > -1:
            layers.append(nn.Dropout(gc_arg.dropout))

        # Gather
        layers.append(GraphGather())
    return layers
예제 #2
0
def create_gconv_net(hparams):
    dim = hparams["gconv"]["dim"]
    gconv_model = GraphConvSequential(
        GraphConvLayer(in_dim=75, out_dim=64), nn.BatchNorm1d(64), nn.ReLU(),
        GraphPool(), GraphConvLayer(in_dim=64, out_dim=64), nn.BatchNorm1d(64),
        nn.ReLU(), GraphPool(), nn.Linear(in_features=64, out_features=dim),
        nn.BatchNorm1d(dim), nn.ReLU(), nn.Dropout(hparams["dprob"]),
        GraphGather(activation='tanh'))
    return nn.Sequential(gconv_model, nn.Linear(dim * 2,
                                                hparams["latent_dim"]),
                         nn.BatchNorm1d(hparams["latent_dim"]), nn.ReLU())
예제 #3
0
def create_gconv_net(hparams):
    gconv_model = GraphConvSequential(GraphConvLayer(in_dim=75, out_dim=64),
                                      nn.BatchNorm1d(64),
                                      nn.ReLU(),
                                      GraphPool(),

                                      GraphConvLayer(in_dim=64, out_dim=64),
                                      nn.BatchNorm1d(64),
                                      nn.ReLU(),
                                      GraphPool(),

                                      nn.Linear(in_features=64, out_features=hparams["gconv"]["dim"]),
                                      nn.BatchNorm1d(hparams["gconv"]["dim"]),
                                      nn.ReLU(),
                                      nn.Dropout(hparams["dprob"]),
                                      GraphGather())
    civ_dim = (hparams["gconv"]["dim"] * 2, hparams["prot"]["dim"])
    return gconv_model, civ_dim
예제 #4
0
    def test_graph_conv_layer(self):
        feat_graph_conv = jova.feat.ConvMolFeaturizer()
        mols = feat_graph_conv(self.mols)
        self.assertEqual(len(mols), len(self.mols))
        input_data = process_graph_conv_input(mols)
        num_atom_feat = input_data[0][-1].shape[0]
        gconv1 = GraphConvLayer(in_dim=num_atom_feat, out_dim=64)

        # conv 1
        out1 = gconv1(input_data)
        self.assertIsNotNone(out1, msg="graph convolution 1 returned None")
        out1 = torch.nn.BatchNorm1d(64)(out1)
        print('\ngraph conv shape=', out1.shape)

        # conv pool
        gconvp1 = GraphPool()
        input_data[0] = out1
        out1 = gconvp1(input_data)
        self.assertIsNotNone(out1, msg="graph pool 1 returned None")
        print('conv pool shape=', out1.shape)

        # conv gather
        gconv_gather = GraphGather()
        input_data[0] = out1
        out_gathered = gconv_gather(input_data, len(mols))
        self.assertIsNotNone(out_gathered, "Nothing was gathered")
        print('out_gathered shape=', out_gathered.shape)

        params = list(gconv1.parameters())
        print('\n# of trainable parameters =', len(params))
        for p in params:
            print(p.size())

        # backprop test
        idx = 1  # np.random.randint(10)
        self.assertIsNone(params[idx].grad, "Gradient property is not none")
        c = 2
        o = c * torch.sum(out_gathered)
        o.backward()
        self.assertIsNotNone(params[idx].grad, "Backprop failed")

        for i in range(len(params)):
            print('parameter-{} (shape={}) gradient = {}'.format(
                i + 1, params[i].shape, params[i].grad))
예제 #5
0
def create_integrated_net(hparams):
    # segment 1 - graphconv
    gconv_model = GraphConvSequential(
        GraphConvLayer(in_dim=75, out_dim=64), nn.BatchNorm1d(64), nn.ReLU(),
        GraphPool(), GraphConvLayer(in_dim=64, out_dim=64), nn.BatchNorm1d(64),
        nn.ReLU(), GraphPool(),
        nn.Linear(in_features=64, out_features=hparams["gconv_dim"]),
        nn.BatchNorm1d(hparams["gconv_dim"]), nn.ReLU(),
        nn.Dropout(hparams["dprob"]), GraphGather())

    # segment 2 - fingerprint
    fp_net = nn.Identity()

    # segment 3 - protein
    prot_net = nn.Identity()

    civ_net = PairSequential(
        (PairSequential(mod1=(gconv_model, ), mod2=(fp_net, )), ),
        (prot_net, ))

    civ_dim = hparams["prot_dim"] + hparams["gconv_dim"] * 2 + hparams["fp_dim"]
    fcn_args = []
    p = civ_dim
    layers = hparams["hdims"]
    if not isinstance(layers, list):
        layers = [layers]
    for dim in layers:
        conf = FcnArgs(in_features=p,
                       out_features=dim,
                       activation='relu',
                       batch_norm=True,
                       dropout=hparams["dprob"])
        fcn_args.append(conf)
        p = dim
    fcn_args.append(FcnArgs(in_features=p, out_features=hparams['output_dim']))
    fcn_layers = create_fcn_layers(fcn_args)
    model = nn.Sequential(civ_net, *fcn_layers)
    return model