Exemplo n.º 1
0
    def __init__(self, args, atom_fdim, bond_fdim):
        """
        :param args: A :class:`~chemprop.args.TrainArgs` object containing model arguments.
        :param atom_fdim: Atom feature vector dimension.
        :param bond_fdim: Bond feature vector dimension.
        """
        super(MPNEncoder, self).__init__()
        self.atom_fdim = atom_fdim
        self.bond_fdim = bond_fdim
        self.atom_messages = args.atom_messages
        self.hidden_size = args.hidden_size
        self.bias = args.bias
        self.depth = args.depth
        self.dropout = args.dropout
        self.layers_per_message = 1
        self.undirected = args.undirected
        self.features_only = args.features_only
        self.use_input_features = args.use_input_features
        self.device = args.device
        self.aggregation = args.aggregation
        self.aggregation_norm = args.aggregation_norm

        # Dropout
        self.dropout_layer = nn.Dropout(p=self.dropout)

        # Activation
        self.act_func = get_activation_function(args.activation)

        # Cached zeros
        self.cached_zero_vector = nn.Parameter(torch.zeros(self.hidden_size),
                                               requires_grad=False)

        # Input
        input_dim = self.atom_fdim if self.atom_messages else self.bond_fdim
        self.W_i = nn.Linear(input_dim, self.hidden_size, bias=self.bias)

        if self.atom_messages:
            w_h_input_size = self.hidden_size + self.bond_fdim
        else:
            w_h_input_size = self.hidden_size

        # Shared weight matrix across depths (default)
        self.W_h = nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias)

        self.W_o = nn.Linear(self.atom_fdim + self.hidden_size,
                             self.hidden_size)

        # layer after concatenating the descriptors if args.atom_descriptors == descriptors
        if args.atom_descriptors == 'descriptor':
            self.atom_descriptors_size = args.atom_descriptors_size
            self.atom_descriptors_layer = nn.Linear(
                self.hidden_size + self.atom_descriptors_size,
                self.hidden_size + self.atom_descriptors_size,
            )
Exemplo n.º 2
0
    def __init__(self, args: Namespace, atom_fdim: int, bond_fdim: int):
        super(MPNEncoder, self).__init__()
        self.atom_fdim = atom_fdim
        self.bond_fdim = bond_fdim
        self.hidden_size = args['hidden_size']
        self.bias = args['bias']
        self.depth = args['depth']
        self.dropout = args['dropout']
        self.layers_per_message = 1
        self.undirected = args['undirected']
        self.atom_messages = args['atom_messages']
        self.features_only = args['features_only']
        self.args = args

        # Dropout
        self.dropout_layer = nn.Dropout(p=self.dropout)

        # Activation
        self.act_func = get_activation_function(args['activation'])

        # Input
        input_dim = self.atom_fdim
        self.W_i_atom = nn.Linear(input_dim, self.hidden_size, bias=self.bias)
        input_dim = self.bond_fdim
        self.W_i_bond = nn.Linear(input_dim, self.hidden_size, bias=self.bias)

        w_h_input_size_atom = self.hidden_size + self.bond_fdim
        self.W_h_atom = nn.Linear(w_h_input_size_atom,
                                  self.hidden_size,
                                  bias=self.bias)

        w_h_input_size_bond = self.hidden_size

        for depth in range(self.depth - 1):
            self._modules[f'W_h_{depth}'] = nn.Linear(w_h_input_size_bond,
                                                      self.hidden_size,
                                                      bias=self.bias)

        self.W_o = nn.Linear((self.hidden_size) * 2, self.hidden_size)

        self.gru = BatchGRU(self.hidden_size)

        self.lr = nn.Linear(self.hidden_size * 3,
                            self.hidden_size,
                            bias=self.bias)
Exemplo n.º 3
0
    def create_ffn(self):
        """
        Creates the feed-forward network for the model.
        :param args: Arguments.
        """
        args = self.model_configs

        self.multiclass = self.task_type == 'Multiclass-Classification'
        if self.multiclass:
            self.num_classes = self.multiclass_num_classes
        if args['features_only']:
            first_linear_dim = self.dim_features
        else:
            first_linear_dim = int(args['hidden_size']) * 1
            first_linear_dim += self.dim_features

        dropout = nn.Dropout(args['dropout'])
        activation = get_activation_function(args['activation'])

        # Create FFN layers
        if args['ffn_num_layers'] == 1:
            ffn = [dropout, nn.Linear(first_linear_dim, self.dim_target)]
        else:
            ffn = [
                dropout,
                nn.Linear(first_linear_dim, args['ffn_hidden_size'])
            ]
            for _ in range(args['ffn_num_layers'] - 2):
                ffn.extend([
                    activation,
                    dropout,
                    nn.Linear(args['ffn_hidden_size'],
                              args['ffn_hidden_size']),
                ])
            ffn.extend([
                activation,
                dropout,
                nn.Linear(args['ffn_hidden_size'], self.dim_target),
            ])

        # Create FFN model
        self.ffn = nn.Sequential(*ffn)
Exemplo n.º 4
0
    def create_ffn(self):
        """
        Creates the feed-forward layers for the model.
        :param args: A :class:`~chemprop.args.TrainArgs` object containing model arguments.
        """
        args = self.model_configs

        self.multiclass = self.task_type == 'Multi-Class'
        if self.multiclass:
            self.num_classes = self.multiclass_num_classes

        if args.features_only:
            first_linear_dim = self.dim_features
        else:
            first_linear_dim = int(args.hidden_size) * 1
            first_linear_dim += self.dim_features

        dropout = nn.Dropout(args.dropout)
        activation = get_activation_function(args.activation)

        # Create FFN layers
        if args.ffn_num_layers == 1:
            ffn = [dropout, nn.Linear(first_linear_dim, self.dim_target)]
        else:
            ffn = [dropout, nn.Linear(first_linear_dim, args.ffn_hidden_size)]
            for _ in range(args.ffn_num_layers - 2):
                ffn.extend([
                    activation,
                    dropout,
                    nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
                ])
            ffn.extend([
                activation,
                dropout,
                nn.Linear(args.ffn_hidden_size, self.dim_target),
            ])

        # Create FFN model
        self.ffn = nn.Sequential(*ffn)