def build_model(self, metadata_path=None, embedding_weights=None):

        self.embedding_weights, self.config = ops.embedding_layer(
            metadata_path, embedding_weights)
        self.embedded_text = tf.nn.embedding_lookup(self.embedding_weights,
                                                    self.input)

        self.embedded_sentences, _ = ops.embed_sentences(
            self.sentences, self.embedding_weights)

        with tf.name_scope("CNN_LSTM"):
            self.cnn_out = ops.multi_filter_conv_block(
                self.embedded_text,
                self.args["n_filters"],
                dropout_keep_prob=self.args["dropout"])
            self.lstm_out = ops.lstm_block(
                self.cnn_out,
                self.args["hidden_units"],
                dropout=self.args["dropout"],
                layers=self.args["rnn_layers"],
                dynamic=False,
                bidirectional=self.args["bidirectional"])
            self.concat_features_sent_words = tf.concat(
                [self.lstm_out, self.embedded_sentences], axis=-1)
            self.out = tf.squeeze(
                fully_connected(self.concat_features_sent_words,
                                1,
                                activation='sigmoid'))

        with tf.name_scope("loss"):
            self.loss = losses.mean_squared_error(self.sentiment, self.out)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        #### Evaluation Measures.
        with tf.name_scope("Pearson_correlation"):
            self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
                self.out, self.sentiment, name="pearson")
        with tf.name_scope("MSE"):
            self.mse, self.mse_update = tf.metrics.mean_squared_error(
                self.sentiment, self.out, name="mse")
コード例 #2
0
    def build_model(self, metadata_path=None, embedding_weights=None):
        self.embedding_weights, self.config = ops.embedding_layer(
            metadata_path, embedding_weights)
        self.embedded = tf.nn.embedding_lookup(self.embedding_weights,
                                               self.input)

        self.lstm_out = ops.lstm_block(
            self.embedded,
            self.args["hidden_units"],
            dropout=self.args["dropout"],
            layers=self.args["rnn_layers"],
            dynamic=False,
            bidirectional=self.args["bidirectional"])

        self.dense1 = fully_connected(self.lstm_out, 128)
        dropped_out = dropout(self.dense1, keep_prob=0.8)

        self.dense2 = fully_connected(dropped_out, 128)
        dropped_out = dropout(self.dense2, keep_prob=0.8)

        self.out = tf.squeeze(fully_connected(dropped_out, 1))

        with tf.name_scope("loss"):
            #self.loss = self.cost()
            self.loss = losses.mean_squared_error(self.input_sim, self.out)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        # Compute some Evaluation Measures to keep track of the training process
        with tf.name_scope("Pearson_correlation"):
            self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
                self.out, self.input_sim, name="pearson")

        # Compute some Evaluation Measures to keep track of the training process
        with tf.name_scope("MSE"):
            self.mse, self.mse_update = tf.metrics.mean_squared_error(
                self.input_sim, self.out, name="mse")
コード例 #3
0
    def build_model(self, metadata_path=None, embedding_weights=None):
        with tf.name_scope("embedding"):
            self.embedding_weights, self.config = ops.embedding_layer(
                metadata_path, embedding_weights)
            self.embedded_text = tf.nn.embedding_lookup(
                self.embedding_weights, self.sentence)

        with tf.name_scope("CNN_LSTM"):
            self.cnn_out = ops.multi_filter_conv_block(
                self.embedded_text,
                self.args["n_filters"],
                dropout_keep_prob=self.args["dropout"])
            self.lstm_out = ops.lstm_block(
                self.cnn_out,
                self.args["hidden_units"],
                dropout=self.args["dropout"],
                layers=self.args["rnn_layers"],
                dynamic=False,
                bidirectional=self.args["bidirectional"])
            self.out = fully_connected(self.lstm_out, 5)

        with tf.name_scope("loss"):
            self.loss = losses.categorical_cross_entropy(
                self.sentiment, self.out)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        #### Evaluation Measures.
        with tf.name_scope("Graph_Accuracy"):
            self.correct_preds = tf.equal(tf.argmax(self.out, 1),
                                          tf.argmax(self.sentiment, 1))
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_preds,
                                                   tf.float32),
                                           name="accuracy")
    def build_model(self, metadata_path=None, embedding_weights=None):
        """
        This method builds the computation graph by adding layers of
        computations. It takes the metadata_path (of the dataset vocabulary)
        and a preloaded word2vec matrix and input and uses them (if not None)
        to initialize the Tensorflow variables. The metadata is used to
        visualize the word embeddings that are being trained using Tensorflow
        Projector. Additionally you can use any other tool to visualize them.
        https://www.tensorflow.org/versions/r0.12/how_tos/embedding_viz/
        :param metadata_path: Path to the metadata of the vocabulary. Refer
        to the datasets API
        https://github.com/mindgarage/Ovation/wiki/The-Datasets-API
        :param embedding_weights: the preloaded w2v matrix that corresponds
        to the vocabulary. Refer to https://github.com/mindgarage/Ovation/wiki/The-Datasets-API#what-does-a-dataset-object-have
        :return:
        """
        # Build the Embedding layer as the first layer of the model

        self.embedding_weights, self.config = ops.embedding_layer(
            metadata_path, embedding_weights)
        self.embedded_s1 = tf.nn.embedding_lookup(self.embedding_weights,
                                                  self.input_s1)
        self.embedded_s2 = tf.nn.embedding_lookup(self.embedding_weights,
                                                  self.input_s2)

        self.s1_cnn_out = ops.multi_filter_conv_block(
            self.embedded_s1,
            self.args["n_filters"],
            dropout_keep_prob=self.args["dropout"])
        self.s1_lstm_out = ops.lstm_block(
            self.s1_cnn_out,
            self.args["hidden_units"],
            dropout=self.args["dropout"],
            layers=self.args["rnn_layers"],
            dynamic=False,
            bidirectional=self.args["bidirectional"])
        ## second Siamese arch part
        self.s2_cnn_out = ops.multi_filter_conv_block(
            self.embedded_s2,
            self.args["n_filters"],
            reuse=True,
            dropout_keep_prob=self.args["dropout"])
        self.s2_lstm_out = ops.lstm_block(
            self.s2_cnn_out,
            self.args["hidden_units"],
            dropout=self.args["dropout"],
            layers=self.args["rnn_layers"],
            dynamic=False,
            reuse=True,
            bidirectional=self.args["bidirectional"])
        self.distance = distances.exponential(self.s1_lstm_out,
                                              self.s2_lstm_out)
        # input sim : GT , distance: m
        with tf.name_scope("loss"):
            self.loss = losses.mean_squared_error(self.input_sim,
                                                  self.distance)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        # Compute some Evaluation Measures to keep track of the training process
        with tf.name_scope("Pearson_correlation"):
            self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
                self.distance, self.input_sim, name="pearson")

        # Compute some Evaluation Measures to keep track of the training process
        with tf.name_scope("MSE"):
            self.mse, self.mse_update = tf.metrics.mean_squared_error(
                self.input_sim, self.distance, name="mse")
    def build_model(self, metadata_path=None, embedding_weights=None):
        self.embedding_weights, self.config = ops.embedding_layer(
            metadata_path, embedding_weights)
        self.embedded = tf.nn.embedding_lookup(self.embedding_weights,
                                               self.input)

        self.embedded = tf.concat((self.embedded, self.augmented), axis=2)

        self.facts = ops.lstm_block(self.embedded,
                                    self.args["hidden_units"],
                                    dropout=self.args["dropout"],
                                    layers=self.args["rnn_layers"],
                                    dynamic=False,
                                    return_seq=True,
                                    return_state=False,
                                    bidirectional=self.args["bidirectional"])

        self.facts = tf.transpose(self.facts, perm=[1, 0, 2])

        self.attention_weights = tf.get_variable(
            "W",
            shape=[self.args['batch_size'], 2 * self.args['hidden_units']])
        # self.attention_weights = tf.parallel_stack([self.attention_weights] *
        #                                             self.args['batch_size'])

        self.attentions = []
        self.sentiment = self.attention_weights
        self.sentiment_memories = [self.sentiment]

        # memory module
        with tf.variable_scope(
                "memory", initializer=tf.contrib.layers.xavier_initializer()):
            print('==> build episodic memory')

            # generate n_hops episodes
            prev_memory = self.sentiment

            for i in range(self.args['num_hops']):
                # get a new episode
                print('==> generating episode', i)
                episode, attn = ops.generate_episode(
                    prev_memory, self.sentiment, self.facts, i,
                    2 * self.args['hidden_units'], self.input_length,
                    self.args['embedding_dim'])
                self.attentions.append(attn)
                # untied weights for memory update
                with tf.variable_scope("hop_%d" % i):
                    prev_memory = tf.layers.dense(
                        tf.concat([prev_memory, episode, self.sentiment], 1),
                        2 * self.args['hidden_units'],
                        activation=tf.nn.relu)
                    self.sentiment_memories.append(prev_memory)
            self.output = prev_memory

        self.output = tf.squeeze(
            self.get_sentiment_score(self.output, self.sentiment))

        with tf.name_scope("loss"):
            self.loss = losses.mean_squared_error(self.input_sim, self.output)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        # Compute some Evaluation Measures to keep track of the training process
        with tf.name_scope("Pearson_correlation"):
            self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
                self.output, self.input_sim, name="pearson")

        # Compute some Evaluation Measures to keep track of the training process
        with tf.name_scope("MSE"):
            self.mse, self.mse_update = tf.metrics.mean_squared_error(
                self.input_sim, self.output, name="mse")
コード例 #6
0
    def build_model(self, metadata_path=None, embedding_weights=None):
        # Transforms the `embedding_weights` data (that are just numpy variables) into a
        # tf.Variable() object (or, if `embedding_weights` is None, just creates a new
        # randomly tf.Variable()
        self.embedding_weights, self.config = ops.embedding_layer(
            metadata_path, embedding_weights)

        # Transforms the `self.input` from a list of numbers into a list of word vectors
        # Output is it Batch x Time x Word_Vector
        self.embedded_input = tf.nn.embedding_lookup(self.embedding_weights,
                                                     self.input)

        # Generate a random fixed vector
        self.fixed_vec = tf.get_variable("fixed_vec", [128], trainable=False)
        self.fixed_vec = tf.parallel_stack([self.fixed_vec] *
                                           self.args.get("sequence_length"))
        new_fixed_vec = self.fixed_vec

        for i in range(1):
            # Concatenate the fixed vector with each word vector
            input_and_fixed = concatenate_matrices(new_fixed_vec,
                                                   self.embedded_input, 64)

            # Apply a softmax in each sequence (i.e., in each element of the batch)
            self.softmaxed_sequences = []
            self.rescaled_sequences = []
            for j, item in enumerate(input_and_fixed):
                sequence = tf.stack(input_and_fixed[j])

                # The Dense layer expects Batch x Input. I am fooling it into believing that
                # it got a batch, and it will process each word separately, which is what I
                # want.
                fc_out = tf.layers.dense(sequence, 1)
                softmaxed_seq = tf.nn.softmax(fc_out)
                self.softmaxed_sequences.append(softmaxed_seq)

                rescaled_seq = tf.multiply(sequence, softmaxed_seq)
                self.rescaled_sequences.append(rescaled_seq)

            self.rescaled_sequences = tf.stack(self.rescaled_sequences)

            # For now, just hardcoding values here
            self.lstm_out = ops.lstm_block(self.rescaled_sequences,
                                           hidden_units=128,
                                           dropout=0.5,
                                           layers=1,
                                           dynamic=False,
                                           bidirectional=True)

            self.loop_dense = tf.layers.dense(self.lstm_out,
                                              128,
                                              activation=tf.nn.sigmoid)
            new_fixed_vec = self.loop_dense

        self.final_dense = tf.layers.dense(self.loop_dense,
                                           1,
                                           activation=tf.nn.sigmoid)
        self.out = tf.squeeze(self.final_dense, 1)

        with tf.name_scope("loss"):
            self.loss = losses.mean_squared_error(self.expected_output,
                                                  self.out)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        #### Evaluation Measures.
        with tf.name_scope("Pearson_correlation"):
            self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
                self.out, self.expected_output, name="pearson")
        with tf.name_scope("MSE"):
            self.mse, self.mse_update = tf.metrics.mean_squared_error(
                self.expected_output, self.out, name="mse")