def qualitative_CNN(self, vocab_size, emb_dim, max_len, nb_filters): self.max_len = max_len max_features = vocab_size filter_lengths = [3, 4, 5] print("Build model...") self.qual_model = Graph() self.qual_conv_set = {} '''Embedding Layer''' self.qual_model.add_input( name='input', input_shape=(max_len,), dtype=int) self.qual_model.add_node(Embedding(max_features, emb_dim, input_length=max_len, weights=self.model.nodes['sentence_embeddings'].get_weights()), name='sentence_embeddings', input='input') '''Convolution Layer & Max Pooling Layer''' for i in filter_lengths: model_internal = Sequential() model_internal.add( Reshape(dims=(1, max_len, emb_dim), input_shape=(max_len, emb_dim))) self.qual_conv_set[i] = Convolution2D(nb_filters, i, emb_dim, activation="relu", weights=self.model.nodes[ 'unit_' + str(i)].layers[1].get_weights()) model_internal.add(self.qual_conv_set[i]) model_internal.add(MaxPooling2D(pool_size=(max_len - i + 1, 1))) model_internal.add(Flatten()) self.qual_model.add_node( model_internal, name='unit_' + str(i), input='sentence_embeddings') self.qual_model.add_output( name='output_' + str(i), input='unit_' + str(i)) self.qual_model.compile( 'rmsprop', {'output_3': 'mse', 'output_4': 'mse', 'output_5': 'mse'})
def __init__(self, output_dimesion, vocab_size, dropout_rate, emb_dim, max_len, nb_filters, init_W=None): self.max_len = max_len max_features = vocab_size vanila_dimension = 200 projection_dimension = output_dimesion filter_lengths = [3, 4, 5] self.model = Graph() '''Embedding Layer''' self.model.add_input(name='input', input_shape=(max_len,), dtype=int) if init_W is None: self.model.add_node(Embedding( max_features, emb_dim, input_length=max_len), name='sentence_embeddings', input='input') else: self.model.add_node(Embedding(max_features, emb_dim, input_length=max_len, weights=[ init_W / 20]), name='sentence_embeddings', input='input') '''Convolution Layer & Max Pooling Layer''' for i in filter_lengths: model_internal = Sequential() model_internal.add( Reshape(dims=(1, self.max_len, emb_dim), input_shape=(self.max_len, emb_dim))) model_internal.add(Convolution2D( nb_filters, i, emb_dim, activation="relu")) model_internal.add(MaxPooling2D( pool_size=(self.max_len - i + 1, 1))) model_internal.add(Flatten()) self.model.add_node(model_internal, name='unit_' + str(i), input='sentence_embeddings') '''Dropout Layer''' self.model.add_node(Dense(vanila_dimension, activation='tanh'), name='fully_connect', inputs=['unit_' + str(i) for i in filter_lengths]) self.model.add_node(Dropout(dropout_rate), name='dropout', input='fully_connect') '''Projection Layer & Output Layer''' self.model.add_node(Dense(projection_dimension, activation='tanh'), name='projection', input='dropout') # Output Layer self.model.add_output(name='output', input='projection') # loss function by oneself self.model.compile('rmsprop', {'output': 'mse'})