def __init__(self, conf, vocab, char_vocab, tag_vocab):
        super(RNN_CNN, self).__init__()
        # Word embedding and initial dropout
        self.w_input = word_emb(conf, vocab)

        # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM
        self.use_chars = conf["use_chars"]
        if self.use_chars:
            self.char_RNN = char_RNN(conf, char_vocab)
            in_shape = self.char_RNN.output_size + conf["w_dim"]
        else:
            in_shape = conf["w_dim"]

        # CNN
        self.w_cnn_layers = word_CNN(conf["w_cnn_layers"],
                                     in_shape,
                                     1,
                                     keep_dims=True)
        output_size = sum([l["filters"] for l in conf["w_cnn_layers"]])

        # Output Layer
        self.mid_dropout = nn.Dropout(conf["mid_dropout"])
        self.n_tags = len(tag_vocab)
        self.output = nn.Linear(output_size, self.n_tags)

        # CRF Layer
        self.use_crf = conf["use_crf"]
        if self.use_crf:
            self.crf = CRF(self.n_tags, batch_first=True)

        # Maybe move to GPU
        self.to(self.device)
    def __init__(self, conf, vocab, char_vocab):
        super(RNN, self).__init__()
        # Word embedding and initial dropout
        self.w_input = word_emb(conf, vocab)

        # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM
        self.use_chars = conf["use_chars"]
        if self.use_chars:
            self.char_RNN = char_RNN(conf, char_vocab)
            in_shape = self.char_RNN.output_size + conf["w_dim"]
        else:
            in_shape = conf["w_dim"]

        # Main BiLSTM
        self.word_RNN = word_RNN(in_shape, conf["w_rnn_out"],
                                 conf["w_rnn_layers"])
        output_size = self.word_RNN.output_size

        # Output Layer
        self.mid_dropout = nn.Dropout(conf["mid_dropout"])
        self.output = nn.Linear(output_size, 1)

        # reset parameters to initializer
        # self.apply(init_all)

        # Maybe move to GPU
        self.to(self.device)
    def __init__(self, conf, vocab, char_vocab, tag_vocab):
        super(RNN, self).__init__()
        # Word embedding and initial dropout
        self.w_input = word_emb(conf, vocab)

        # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM
        self.use_chars = conf["use_chars"]
        if self.use_chars:
            self.char_RNN = char_RNN(conf, char_vocab)
            in_shape = self.char_RNN.output_size + conf["w_dim"]
        else:
            in_shape = conf["w_dim"]

        # Main BiLSTM
        self.word_RNN = word_RNN(in_shape, conf["w_rnn_out"],
                                 conf["w_rnn_layers"])
        output_size = self.word_RNN.output_size

        # Output Layer
        self.mid_dropout = nn.Dropout(conf["mid_dropout"])
        self.n_tags = len(tag_vocab)
        self.output = nn.Linear(output_size, self.n_tags)

        # CRF Layer
        self.use_crf = conf["use_crf"]
        if self.use_crf:
            self.crf = CRF(self.n_tags, batch_first=True)

        # Initialize weights
        # self.apply(init_all)

        # Maybe move to GPU
        self.to(self.device)
    def __init__(self, conf, vocab, char_vocab):
        super(RNN_CNN, self).__init__()
        # Word embedding and initial dropout
        self.w_input = word_emb(conf, vocab)

        # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM
        self.use_chars = conf["use_chars"]
        if self.use_chars:
            self.char_RNN = char_RNN(conf, char_vocab)
            in_shape = self.char_RNN.output_size + conf["w_dim"]
        else:
            in_shape = conf["w_dim"]

        # CNN
        self.w_cnn_layers = word_CNN(conf["w_cnn_layers"], in_shape, 1)
        output_size = self.w_cnn_layers.output_size

        # Output Layer
        #self.mid_norm = nn.BatchNorm1d(output_size)
        self.mid_dropout = nn.Dropout(conf["mid_dropout"])
        self.output = nn.Linear(output_size, 1)

        # Maybe move to GPU
        self.to(self.device)