コード例 #1
0
 def __init__(self,
              h_max_length=20,
              b_max_length=200,
              trainable=False,
              lstm_layers=2,
              mlp_layers=1,
              num_neurons=[128, 128, 32],
              share_parameters=True,
              average_pooling=False,
              optimizer=tf.train.AdamOptimizer,
              learning_rate=0.001,
              batch_size=128,
              activation=tf.nn.relu,
              initializer=he_init,
              num_epoch=20,
              batch_norm_momentum=None,
              dropout_rate=None,
              max_check_without_progress=20,
              show_progress=10,
              tensorboard_logdir=None,
              random_state=None,
              embedding=None,
              l2_lambda=0.01):
     LSTM.__init__(self, h_max_length, b_max_length, trainable, lstm_layers,
                   mlp_layers, num_neurons, share_parameters,
                   average_pooling, optimizer, learning_rate, batch_size,
                   activation, initializer, num_epoch, batch_norm_momentum,
                   dropout_rate, max_check_without_progress, show_progress,
                   tensorboard_logdir, random_state, embedding, l2_lambda)
コード例 #2
0
    def __init__(self,
                 h_max_length=20,
                 b_max_length=200,
                 trainable=False,
                 lstm_layers=2,
                 mlp_layers=1,
                 num_neurons=[128, 128, 32],
                 share_parameters=True,
                 average_pooling=False,
                 optimizer=tf.train.AdamOptimizer,
                 learning_rate=0.001,
                 batch_size=128,
                 activation=tf.nn.relu,
                 initializer=he_init,
                 num_epoch=100,
                 batch_norm_momentum=None,
                 dropout_rate=None,
                 max_check_without_progress=10,
                 show_progress=10,
                 tensorboard_logdir=None,
                 random_state=None,
                 embedding=None,
                 l2_lambda=0.01,
                 vocab_size=None,
                 n_outputs=3,
                 pos_weight=None):
        LSTM.__init__(self, h_max_length, b_max_length, trainable, lstm_layers,
                      mlp_layers, num_neurons, share_parameters,
                      average_pooling, optimizer, learning_rate, batch_size,
                      activation, initializer, num_epoch, batch_norm_momentum,
                      dropout_rate, max_check_without_progress, show_progress,
                      tensorboard_logdir, random_state, embedding, l2_lambda,
                      vocab_size)

        self.mlp_layers = len(num_neurons) - 2
        self.vocab_size = vocab_size
        self.embedding_size = 300 + dim_fasttext
        self.n_outputs = n_outputs
        self.pos_weight = pos_weight
        self._graph = None
        self._classes = None
        self._session = None
        self.logger = LogHelper.get_logger(self.__class__.__name__)
        if self.embedding is None and self.vocab_size is None:
            raise Exception("Either embedding or vocab_size must be set!")
コード例 #3
0
    def __init__(self,
                 h_max_length=20,
                 b_max_length=200,
                 trainable=False,
                 lstm_layers=2,
                 mlp_layers=1,
                 num_neurons=[128, 128, 32],
                 share_parameters=True,
                 average_pooling=False,
                 optimizer=tf.train.AdamOptimizer,
                 learning_rate=0.001,
                 batch_size=128,
                 activation=tf.nn.relu,
                 initializer=he_init,
                 num_epoch=40,
                 batch_norm_momentum=None,
                 dropout_rate=None,
                 max_check_without_progress=20,
                 show_progress=10,
                 tensorboard_logdir=None,
                 random_state=None,
                 embedding=None,
                 l2_lambda=0.01,
                 word_output_size=64,
                 sent_output_size=64,
                 vocab_size=None):
        LSTM.__init__(self, h_max_length, b_max_length, trainable, lstm_layers,
                      mlp_layers, num_neurons, share_parameters,
                      average_pooling, optimizer, learning_rate, batch_size,
                      activation, initializer, num_epoch, batch_norm_momentum,
                      dropout_rate, max_check_without_progress, show_progress,
                      tensorboard_logdir, random_state, embedding, l2_lambda,
                      vocab_size)

        self.word_output_size = word_output_size
        self.sent_output_size = sent_output_size
        self.vocab_size = vocab_size
        self.embedding_size = 100
        if self.embedding is None and self.vocab_size is None:
            raise Exception("Either embedding or vocab_size must be setted!")