Exemple #1
0
    def create_ffn(self, args: Namespace):
        """
        Creates the feed-forward network for the model.
        :param args: Arguments.
        """
        if args.features_only:
            first_linear_dim = args.features_size
        else:
            first_linear_dim = args.hidden_size
            if args.use_input_features:
                first_linear_dim += args.features_dim
        dropout = nn.Dropout(args.dropout)
        activation = get_activation_function(args.activation)

        if args.ffn_num_layers == 1:
            ffn = [dropout, nn.Linear(first_linear_dim, args.num_tasks)]
        else:
            ffn = [dropout, nn.Linear(first_linear_dim, args.ffn_hidden_size)]
            for _ in range(args.ffn_num_layers - 2):
                ffn.extend([
                    activation,
                    dropout,
                    nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
                ])
            ffn.extend([
                activation,
                dropout,
                nn.Linear(args.ffn_hidden_size, args.num_tasks),
            ])

        if args.dataset_type == 'classification':
            ffn.append(self.sigmoid)

        self.ffn = nn.Sequential(*ffn)
Exemple #2
0
    def __init__(self, args: Namespace, atom_fdim: int, bond_fdim: int):
        """Initializes the MPNEncoder.

        :param args: Arguments.
        :param atom_fdim: Atom features dimension.
        :param bond_fdim: Bond features dimension.
        """
        super(PairMPNEncoder, self).__init__()
        self.atom_fdim = atom_fdim
        self.bond_fdim = bond_fdim
        self.hidden_size = args.hidden_size
        self.bias = args.bias
        self.depth = args.depth
        self.dropout = args.dropout
        self.layers_per_message = 1
        self.undirected = args.undirected
        self.atom_messages = args.atom_messages
        self.features_only = args.features_only
        self.use_input_features = args.use_input_features
        self.args = args

        if self.features_only:
            return

        # Dropout
        self.dropout_layer = nn.Dropout(p=self.dropout)

        # Activation
        self.act_func = get_activation_function(args.activation)

        # Cached zeros
        self.cached_zero_vector = nn.Parameter(torch.zeros(self.hidden_size),
                                               requires_grad=False)

        # Input
        input_dim = self.atom_fdim if self.atom_messages else self.bond_fdim
        self.W_i = nn.Linear(input_dim, self.hidden_size, bias=self.bias)

        if self.atom_messages:
            w_h_input_size = self.hidden_size + self.bond_fdim
        else:
            w_h_input_size = self.hidden_size

        # Shared weight matrix across depths (default)
        self.W_h = nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias)

        self.W_o = nn.Linear(self.atom_fdim + self.hidden_size,
                             self.hidden_size)

        # alignment
        if self.args.align:
            self.align = nn.ModuleList(
                [Alignment(args) for _ in range(self.depth - 1)])
            self.mix = nn.ModuleList([
                Mixture(self.hidden_size, self.hidden_size)
                for _ in range(self.depth - 1)
            ])
Exemple #3
0
    def create_ffn(self, args: Namespace):
        """
        Creates the feed-forward network for the model.

        :param args: Arguments.
        """
        self.multiclass = args.dataset_type == 'multiclass'
        if self.multiclass:
            self.num_classes = args.multiclass_num_classes
        self.multilabel = args.dataset_type == 'multilabel'
        if self.multilabel:
            self.num_labels = args.num_labels
        if args.features_only:
            first_linear_dim = args.features_size
        else:
            first_linear_dim = args.hidden_size
            if args.use_input_features:
                first_linear_dim += args.features_dim
        if args.smiles_based and args.pooling == 'lstm':
            first_linear_dim = 2 * first_linear_dim

        dropout = nn.Dropout(args.dropout)
        activation = get_activation_function(args.activation)
        fusioner = FEATURES_FUSIONER_REGISTRY[args.fusioner]

        # Create FFN layers
        if args.ffn_num_layers == 1:
            # ffn = [
            #     # dropout,
            #     fusioner(first_linear_dim, args.output_size)
            # ]
            self.fusion_ffn = fusioner(first_linear_dim, args.output_size)
            self.ffn = None
        else:
            # ffn = [
            #     # dropout,
            #     fusioner(first_linear_dim, args.ffn_hidden_size)
            # ]
            self.fusion_ffn = fusioner(first_linear_dim, args.ffn_hidden_size)
            ffn = []
            for _ in range(args.ffn_num_layers - 2):
                ffn.extend([
                    activation,
                    dropout,
                    nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
                ])
            ffn.extend([
                activation,
                dropout,
                nn.Linear(args.ffn_hidden_size, args.output_size),
            ])
            # Create FFN model
            self.ffn = nn.Sequential(*ffn)
        self.dropout = dropout
    def __init__(self,
                 name,
                 order,
                 hidden_size,
                 activation,
                 path,
                 input_layer=None,
                 pre_train=False,
                 tied_weights=True,
                 verbose="FULL",
                 num_pre_train_epochs=2000,
                 pre_train_batch_size=5000,
                 pre_train_cost="MSE",
                 pre_train_optimizer="Adam",
                 pre_train_learning_rate=0.001):

        self.path = path
        self.name = name
        self.order = order
        self.hidden_size = hidden_size
        self.activation_function = nn_utils.get_activation_function(activation)
        self.pre_train = pre_train
        self.tied_weights = tied_weights
        self.num_pre_train_epochs = num_pre_train_epochs
        self.pre_train_batch_size = pre_train_batch_size
        self.pre_train_cost = pre_train_cost
        self.pre_train_optimizer_function = nn_utils.get_optimizer_function(
            pre_train_optimizer)
        self.pre_train_learning_rate = pre_train_learning_rate
        self.verbose = verbose
        self.input_layer = input_layer
        self.layer_type = "Autoencoder"

        self.init = None
        self.tf_session = None
        self.pre_train_step = None
        self.encode = None
        self.pre_train_encode = None
        self.pre_train_decode = None
        self._input_data = None
        self._output_data = None
        self._input_labels = None
        self.Wi = None
        self.bi = None
        self.Wo = None
        self.bo = None
        self.cost = None
Exemple #5
0
    def create_ffn(self, args: Namespace):
        """
        Creates the feed-forward network for the model.

        :param args: Arguments.
        """
        self.multiclass = args.dataset_type == 'multiclass'
        if self.multiclass:
            self.num_classes = args.multiclass_num_classes
        if args.features_only:
            first_linear_dim = args.features_size
        else:
            first_linear_dim = args.hidden_size
            if args.use_input_features:
                first_linear_dim += args.features_dim
        if args.pooling == 'lstm':
            first_linear_dim *= (1 * 2)

        dropout = nn.Dropout(args.dropout)
        activation = get_activation_function(args.activation)

        # Create FFN layers
        if args.ffn_num_layers == 1:
            ffn = [
                dropout,
                nn.Linear(first_linear_dim, args.output_size)
            ]
        else:
            ffn = [
                dropout,
                nn.Linear(first_linear_dim, args.ffn_hidden_size)
            ]
            for _ in range(args.ffn_num_layers - 2):
                ffn.extend([
                    activation,
                    dropout,
                    nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
                ])
            ffn.extend([
                activation,
                dropout,
                nn.Linear(args.ffn_hidden_size, args.output_size),
            ])

        # Create FFN model
        self.ffn = nn.Sequential(*ffn)
Exemple #6
0
    def create_ffn(self, args: Namespace):
        dropout = nn.Dropout(args.dropout)
        activation = get_activation_function(args.activation)

        self.fusion_ffn_local = nn.Linear(args.hidden_size, args.ffn_hidden_size)
        self.fusion_ffn_global = nn.Linear(args.gcn_hidden3, args.ffn_hidden_size)
        ffn = []
        # after fusion layer
        for _ in range(args.ffn_num_layers - 2):
            ffn.extend([
                activation,
                dropout,
                nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
            ])
        ffn.extend([
            activation,
            dropout,
            nn.Linear(args.ffn_hidden_size, args.drug_nums),
        ])
        # Create FFN model
        self.ffn = nn.Sequential(*ffn)
        self.dropout = dropout
Exemple #7
0
    def __init__(self, args: Namespace, atom_fdim: int, bond_fdim: int):
        """Initializes the MPNEncoder.

        :param args: Arguments.
        :param atom_fdim: Atom features dimension.
        :param bond_fdim: Bond features dimension.
        """
        super(MPNEncoder, self).__init__()
        self.atom_fdim = atom_fdim
        self.bond_fdim = bond_fdim
        self.hidden_size = args.hidden_size
        self.bias = args.bias
        self.depth = args.depth
        self.dropout = args.dropout
        self.layers_per_message = 1
        self.undirected = args.undirected
        self.atom_messages = args.atom_messages
        self.features_only = args.features_only
        self.use_input_features = args.use_input_features
        self.args = args

        if self.features_only:
            return

        # Dropout
        self.dropout_layer = nn.Dropout(p=self.dropout)

        # Activation
        self.act_func = get_activation_function(args.activation)

        # Cached zeros
        self.cached_zero_vector = nn.Parameter(torch.zeros(self.hidden_size),
                                               requires_grad=False)

        # Input
        input_dim = self.atom_fdim if self.atom_messages else self.bond_fdim
        self.W_i = nn.Linear(input_dim, self.hidden_size, bias=self.bias)

        if self.atom_messages:
            w_h_input_size = self.hidden_size + self.bond_fdim
        else:
            w_h_input_size = self.hidden_size

        # Shared weight matrix across depths (default)
        self.weight_tying = self.args.weight_tying
        n_message_layer = 1 if self.weight_tying else self.depth - 1
        # self.W_h = nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias)
        self.W_h = nn.ModuleList([
            nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias)
            for _ in range(n_message_layer)
        ])

        self.W_o = nn.Linear(self.atom_fdim + self.hidden_size,
                             self.hidden_size)

        # TODO: parameters for attention
        self.attn_num_d = self.args.attn_num_d
        self.attn_num_r = self.args.attn_num_r
        self.W_s1 = Parameter(
            torch.FloatTensor(self.hidden_size, self.attn_num_d))
        self.W_s2 = Parameter(
            torch.FloatTensor(self.attn_num_d, self.attn_num_r))
        self.softmax = nn.Softmax(dim=1)

        self.i_layer = nn.Linear(self.hidden_size, self.hidden_size)
        self.j_layer = nn.Linear(self.hidden_size, self.hidden_size)