def get_model(self, trainable=None): trainable = self.trainable if trainable is None else trainable inputs = Input(shape=(self.max_len, )) emb = get_embedding_layer(self.data.tokenizer, max_len=self.max_len, embedding_dim=self.embed_size, use_pretrained=self.use_pretrained, trainable=trainable, use_new_vector=self.use_new_vector)(inputs) x = Bidirectional( CuDNNLSTM(cfg.LSTM_hidden_size, return_sequences=True))(emb) x = Bidirectional( CuDNNLSTM(cfg.LSTM_hidden_size, return_sequences=True))(x) con = concatenate([x, emb], axis=2) concat_x = [] for filter_size in cfg.TEXT_CNN_filters: x = con x = self._conv_relu_maxpool(x, filter_size) concat_x.append(x) x = concatenate(concat_x, axis=1) x = Dropout(0.5)(x) x = Dense(5, activation='relu')(x) x = Dense(1, activation='linear')(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='mse', optimizer=self.optimizer) return model
def get_model(self, trainable=None): inputs = Input(shape=(self.max_len, )) emb = get_embedding_layer(self.data.tokenizer, max_len=self.max_len, embedding_dim=self.embed_size, use_pretrained=self.use_pretrained, trainable=trainable, use_new_vector=self.use_new_vector)(inputs) x = Conv1D(128, self.filter_window, activation='relu')(emb) x = GlobalMaxPool1D()(x) x = Dense(1, activation='linear')(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='mse', optimizer=self.optimizer) return model
def get_model(self, trainable=None): trainable = self.trainable if trainable is None else trainable inputs = Input(shape=(self.max_len,)) emb = get_embedding_layer(self.data.tokenizer, max_len=self.max_len, embedding_dim=self.embed_size, use_pretrained=self.use_pretrained, trainable=trainable, use_new_vector=self.use_new_vector)(inputs) emb = SpatialDropout1D(0.5)(emb) x = Bidirectional(CuDNNGRU(cfg.LSTM_hidden_size, return_sequences=True))(emb) x = Attention(self.max_len)(x) x = Dropout(0.5)(x) x = Dense(5, activation='relu')(x) x = Dense(1, activation='linear')(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='mse', optimizer=self.optimizer) return model
def get_model(self, trainable=None): inputs = Input(shape=(self.max_len, )) emb = get_embedding_layer(self.data.tokenizer, max_len=self.max_len, embedding_dim=self.embed_size, use_pretrained=self.use_pretrained, trainable=trainable, use_new_vector=self.use_new_vector)(inputs) x = Bidirectional(CuDNNGRU(128, return_sequences=True))(emb) x = self.attention_3d_block(x) x = GlobalMaxPool1D()(x) x = Dense(128, activation='relu')(x) x = Dense(1, activation='linear')(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='mse', optimizer=self.optimizer) return model
def get_model(self, trainable=None): trainable = self.trainable if trainable is None else trainable inputs = Input(shape=(self.max_len, )) emb = get_embedding_layer(self.data.tokenizer, max_len=self.max_len, embedding_dim=self.embed_size, use_pretrained=self.use_pretrained, trainable=trainable, use_new_vector=self.use_new_vector)(inputs) concat_x = [] for filter_size in self.filters: x = emb x = self._conv_relu_maxpool(x, filter_size) concat_x.append(x) x = concatenate(concat_x, axis=1) x = Dropout(0.5)(x) x = Dense(5, activation='relu')(x) x = Dense(1, activation='linear')(x) model = Model(inputs=inputs, outputs=x) model.compile(loss='mse', optimizer=self.optimizer) return model