Beispiel #1
0
    def __init__(self, num_class=2, config=None):
        super(RCNN, self).__init__()

        main_dir = os.path.dirname(os.path.realpath(__file__))
        config_path = os.path.join(main_dir, "config.yml")
        default_config = read_yaml(config_path)

        config = (merge_dictb_to_dicta(default_config, config)
                  if config is not None else default_config)

        # Embedding Layer
        self.embeddings = nn.Embedding(config.vocab_size, config.embedding_dim)

        # Bi-directional LSTM for RCNN
        self.lstm = nn.LSTM(
            input_size=config.embedding_dim,
            hidden_size=config.hidden_dim,
            num_layers=config.n_layers,
            dropout=config.dropout,
            bidirectional=True,
        )

        self.dropout = nn.Dropout(config.dropout)

        # Linear layer to get "convolution output" to be passed to Pooling Layer
        self.W = nn.Linear(
            config.embedding_dim + 2 * config.hidden_dim,
            config.hidden_dim_linear,
        )

        # Tanh non-linearity
        self.tanh = nn.Tanh()

        # Fully-Connected Layer
        self.fc = nn.Linear(config.hidden_dim_linear, num_class)
Beispiel #2
0
    def __init__(self, output_dim, hparams):
        super().__init__()

        main_dir = Path(os.path.dirname(os.path.realpath(__file__))).parent
        config_path = os.path.join(main_dir, "config/bilstm.yml")
        default_hparams = read_yaml(config_path)

        hparams = merge_dictb_to_dicta(default_hparams, hparams)

        self.embedding = nn.Embedding(
            hparams.vocab_size, hparams.embedding_dim, padding_idx=hparams.pad_idx
        )

        self.rnn = nn.LSTM(
            hparams.embedding_dim,
            hparams.hidden_dim,
            num_layers=hparams.n_layers,
            bidirectional=hparams.bidirectional,
            dropout=hparams.dropout,
        )

        self.fc = nn.Linear(hparams.hidden_dim * 2, output_dim)

        self.dropout = nn.Dropout(hparams.dropout)

        self.criterion = nn.CrossEntropyLoss()
    def __init__(self, output_dim, config=None):

        super().__init__()

        main_dir = os.path.dirname(os.path.realpath(__file__))
        config_path = os.path.join(main_dir, "config.yml")
        default_config = read_yaml(config_path)

        config = (merge_dictb_to_dicta(default_config, config)
                  if config is not None else default_config)

        self.embedding = nn.Embedding(config.vocab_size,
                                      config.embedding_dim,
                                      padding_idx=config.pad_idx)

        self.convs = nn.ModuleList([
            nn.Conv2d(
                in_channels=1,
                out_channels=config.n_filters,
                kernel_size=(fs, config.embedding_dim),
            ) for fs in config.filter_sizes
        ])

        self.fc = nn.Linear(
            len(config.filter_sizes) * config.n_filters, output_dim)

        self.dropout = nn.Dropout(config.dropout)
        self.criterion = nn.CrossEntropyLoss()
Beispiel #4
0
    def __init__(self, num_class=2, config=None):
        super(Seq2SeqAttention, self).__init__()

        main_dir = os.path.dirname(os.path.realpath(__file__))
        config_path = os.path.join(main_dir, "config.yml")
        default_config = read_yaml(config_path)

        config = (merge_dictb_to_dicta(default_config, config)
                  if config is not None else default_config)

        # Embedding Layer
        self.embeddings = nn.Embedding(config.vocab_size, config.embedding_dim)

        # Encoder RNN
        self.lstm = nn.LSTM(
            input_size=config.embedding_dim,
            hidden_size=config.hidden_dim,
            num_layers=config.n_layers,
            bidirectional=config.bidirectional,
        )

        # Dropout Layer
        self.dropout = nn.Dropout(config.dropout)

        # Fully-Connected Layer
        self.fc = nn.Linear(config.hidden_dim * (1 + config.bidirectional) * 2,
                            num_class)

        # Softmax non-linearity
        self.softmax = nn.Softmax()

        self.config = config
Beispiel #5
0
    def __init__(self, output_dim, config=None):
        super().__init__()

        main_dir = os.path.dirname(os.path.realpath(__file__))
        config_path = os.path.join(main_dir, "config.yml")
        default_config = read_yaml(config_path)

        config = (
            merge_dictb_to_dicta(default_config, config)
            if config is not None
            else default_config
        )

        self.embedding = nn.Embedding(
            config.vocab_size, config.embedding_dim, padding_idx=config.pad_idx
        )

        self.rnn = nn.LSTM(
            config.embedding_dim,
            config.hidden_dim,
            num_layers=config.n_layers,
            bidirectional=config.bidirectional,
            dropout=config.dropout,
        )

        self.fc = nn.Linear(config.hidden_dim * 2, output_dim)

        self.dropout = nn.Dropout(config.dropout)

        self.criterion = nn.CrossEntropyLoss()
Beispiel #6
0
    def __init__(self, num_class=2, config=None):
        super(FastText, self).__init__()

        main_dir = os.path.dirname(os.path.realpath(__file__))
        config_path = os.path.join(main_dir, "config.yml")
        default_config = read_yaml(config_path)

        config = (
            merge_dictb_to_dicta(default_config, config)
            if config is not None
            else default_config
        )

        # Embedding Layer
        self.embeddings = nn.Embedding(config.vocab_size, config.embedding_dim)

        # Hidden Layer
        self.fc1 = nn.Linear(config.embedding_dim, config.hidden_dim)

        # Output Layer
        self.fc2 = nn.Linear(config.hidden_dim, num_class)