Example #1
0
 def __init__(self, params, use_cuda):
     super(CNNEncoder, self).__init__(params, use_cuda)
     check_params(params, self.get_required_params(),
                  self.get_optional_params())
     self.dropout = params['dropout']
     self.input_size = params['input_size']
     self.encoder_dim = params['encoder_dim']
     self.pooling = params['pooling']
     if self.pooling not in ['max', 'mean', 'sum']:
         raise ValuError("Pooling must be one of 'max', 'mean', 'sum'")
     kernel_sizes = params['kernel_sizes']
     assert len(kernel_sizes) > 0
     self.convolutions = nn.ModuleList()
     self.dropouts = nn.ModuleList()
     out_channels = self.encoder_dim
     for i in range(len(kernel_sizes)):
         if i == 0:
             in_channels = self.input_size
         else:
             in_channels = self.encoder_dim
         self.dropouts.append(nn.Dropout(self.dropout))
         kernel_size = kernel_sizes[i]
         self.convolutions.append(
             ConvBNReLU(in_channels=in_channels,
                        out_channels=out_channels,
                        kernel_size=kernel_size,
                        stride=1))
Example #2
0
    def __init__(self, params):
        super(OpenChemModel, self).__init__()
        check_params(params, self.get_required_params(),
                     self.get_optional_params())
        if 'lr_scheduler' not in params.keys():
            params['lr_scheduler'] = None
            params['lr_scheduler_params'] = None
        self.params = params
        self.use_cuda = self.params['use_cuda']
        self.batch_size = self.params['batch_size']
        self.eval_metrics = self.params['eval_metrics']
        self.task = self.params['task']
        self.logdir = self.params['logdir']

        self.num_epochs = self.params['num_epochs']
        if 'use_clip_grad' in self.params.keys():
            self.use_clip_grad = self.params['use_clip_grad']
        else:
            self.use_clip_grad = False
        if self.use_clip_grad:
            self.max_grad_norm = self.params['max_grad_norm']
        else:
            self.max_grad_norm = None
        self.random_seed = self.params['random_seed']
        self.print_every = self.params['print_every']
        self.save_every = self.params['save_every']
Example #3
0
 def __init__(self, params):
     super(OpenChemMLP, self).__init__()
     check_params(params, self.get_required_params(),
                  self.get_optional_params())
     self.params = params
     self.hidden_size = self.params['hidden_size']
     self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
     self.n_layers = self.params['n_layers']
     self.activation = self.params['activation']
     if type(self.activation) is list:
         assert len(self.activation) == self.n_layers
     else:
         self.activation = [self.activation] * self.n_layers
     if 'dropout' in self.params.keys():
         self.dropout = self.params['dropout']
     else:
         self.dropout = 0
     self.layers = nn.ModuleList([])
     self.bn = nn.ModuleList([])
     self.dropouts = nn.ModuleList([])
     for i in range(self.n_layers):
         self.dropouts.append(nn.Dropout(self.dropout))
         self.bn.append(nn.BatchNorm1d(self.hidden_size[i]))
         self.layers.append(
             nn.Linear(in_features=self.input_size[i],
                       out_features=self.hidden_size[i]))
Example #4
0
    def __init__(self, params):
        super(OpenChemMLPSimple, self).__init__()
        check_params(params, self.get_required_params(),
                     self.get_optional_params())
        self.params = params
        self.hidden_size = self.params['hidden_size']
        self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
        self.n_layers = self.params['n_layers']
        self.activation = self.params['activation']
        if type(self.activation) is list:
            assert len(self.activation) == self.n_layers
        else:
            self.activation = [self.activation] * self.n_layers

        self.layers = nn.ModuleList([])
        for i in range(self.n_layers):
            self.layers.append(
                nn.Linear(in_features=self.input_size[i],
                          out_features=self.hidden_size[i]))

        if "init" in self.params.keys():
            if self.params["init"] == "xavier_uniform":
                for m in self.modules():
                    if isinstance(m, nn.Linear):
                        nn.init.xavier_uniform_(
                            m.weight, gain=nn.init.calculate_gain('relu'))
            else:
                raise NotImplementedError("Only xavier_uniform "
                                          "initialization is "
                                          "supported now in OpenChemMLPSimple")
Example #5
0
 def __init__(self, params, use_cuda=None):
     super(OpenChemEncoder, self).__init__()
     check_params(params, self.get_required_params(),
                  self.get_required_params())
     self.params = params
     if use_cuda is None:
         use_cuda = torch.cuda.is_available()
     self.use_cuda = use_cuda
     self.input_size = self.params['input_size']
     self.encoder_dim = self.params['encoder_dim']
Example #6
0
 def __init__(self, params):
     super(OpenChemEmbedding, self).__init__()
     check_params(params, self.get_required_params(),
                  self.get_optional_params())
     self.params = params
     self.num_embeddings = self.params['num_embeddings']
     if 'padding_idx' in params.keys():
         self.padding_idx = self.params['padding_idx']
     else:
         self.padding_idx = None
Example #7
0
    def __init__(self, params, use_cuda):
        super(RNNEncoder, self).__init__(params, use_cuda)
        check_params(params, self.get_required_params(),
                     self.get_optional_params())
        self.layer = self.params['layer']
        layers = ['LSTM', 'GRU', 'RNN']
        if self.layer not in ['LSTM', 'GRU', 'RNN']:
            raise ValueError(self.layer + ' is invalid value for argument'
                             ' \'layer\'. Choose one from :' +
                             ', '.join(layers))

        self.input_size = self.params['input_size']
        self.encoder_dim = self.params['encoder_dim']
        self.n_layers = self.params['n_layers']
        if self.n_layers > 1:
            self.dropout = self.params['dropout']
        else:
            UserWarning('dropout can be non zero only when n_layers > 1. '
                        'Parameter dropout set to 0.')
            self.dropout = 0
        self.bidirectional = self.params['is_bidirectional']
        if self.bidirectional:
            self.n_directions = 2
        else:
            self.n_directions = 1
        if self.layer == 'LSTM':
            self.rnn = nn.LSTM(self.input_size,
                               self.encoder_dim,
                               self.n_layers,
                               bidirectional=self.bidirectional,
                               dropout=self.dropout,
                               batch_first=True)
        elif self.layer == 'GRU':
            self.rnn = nn.GRU(self.input_size,
                              self.encoder_dim,
                              self.n_layers,
                              bidirectional=self.bidirectional,
                              dropout=self.dropout,
                              batch_first=True)
        else:
            self.layer = nn.RNN(self.input_size,
                                self.encoder_dim,
                                self.n_layers,
                                bidirectional=self.bidirectional,
                                dropout=self.dropout,
                                batch_first=True)
Example #8
0
 def __init__(self, params, use_cuda):
     super(GraphCNNEncoder, self).__init__(params, use_cuda)
     check_params(params, self.get_required_params(),
                  self.get_optional_params())
     self.n_layers = params['n_layers']
     self.hidden_size = params['hidden_size']
     if 'dropout' in params.keys():
         self.dropout = params['dropout']
     else:
         self.dropout = 0
     assert len(self.hidden_size) == self.n_layers
     self.hidden_size = [self.input_size] + self.hidden_size
     self.graph_convolutions = nn.ModuleList()
     self.dropout_layer = nn.Dropout(p=self.dropout)
     self.dense = nn.Linear(in_features=self.hidden_size[-1],
                            out_features=self.encoder_dim)
     for i in range(1, self.n_layers + 1):
         self.graph_convolutions.append(
             GraphConvolution(self.hidden_size[i - 1], self.hidden_size[i]))