def run_epoch(self, sess, data_x, data_y, len_list, verbose=10):
        """Runs an epoch of training.

        Trains the model for one-epoch.

        Args:
            sess: tf.Session() object
            data_x: input data, have shape of (data_num, num_steps)
            data_y: label, have shape of (data_num, class_num)
            len_list: length list correspond to data_x, have shape of (data_num)
        Returns:
            average_loss: scalar. Average minibatch loss of model on epoch.
        """
        total_steps = sum(1 for x in helper.data_iter(data_x, data_y, len_list,
                                                      self.config.batch_size))
        total_loss = []
        for step, (data_train, label_train, data_len) in enumerate(
                helper.data_iter(data_x, data_y, len_list,
                                 self.config.batch_size)):
            feed_dict = self.create_feed_dict(data_train, data_len,
                                              label_train)
            _, loss, lr = sess.run(
                [self.train_op, self.loss, self.learning_rate],
                feed_dict=feed_dict)
            #sess.run(self.embed_normalize_op)
            total_loss.append(loss)
            if verbose and step % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}, lr = {}'.format(
                    step, total_steps, np.mean(total_loss[-verbose:]), lr))
                sys.stdout.flush()
        return np.mean(total_loss)
Example #2
0
    def predict(self, sess, input_data, verbose=None):
        preds = []
        true_label = []
        lengths = []
        for _, (b_data, b_order) in enumerate(
                helper.data_iter(input_data, self.config.batch_size)):
            order_indices = b_order
            pred = None
            ret_label = None
            sent_num_dec = None
            for i in range(self.config.processing_step):
                (ret_batch, ret_label, sent_num_enc, sent_num_dec,
                 sent_len) = helper.shuffleData(b_data, order_indices,
                                                self.vocab)
                feed_dict = self.create_feed_dict(ret_batch, sent_len,
                                                  sent_num_enc, ret_label,
                                                  sent_num_dec)
                pred = sess.run(self.prediction, feed_dict=feed_dict)
                pred = pred.tolist()
                order_indices = helper.reorder(order_indices, pred,
                                               sent_num_dec)

            preds += pred
            true_label += ret_label.tolist()
            lengths += sent_num_dec

        return preds, true_label, lengths
Example #3
0
    def fit(self, sess, input_data, verbose=None):
        """
        Runs an epoch of validation or test. return test error

        Args:
            sess: tf.Session() object
            input_data: tuple of (encode_input, decode_input, decode_label)
        Returns:
            avg_loss: scalar. Average minibatch loss of model on epoch.
        """
        total_loss = []
        for step, (b_data, b_order) in enumerate(
                helper.data_iter(input_data, self.config.batch_size)):
            order_indices = b_order
            losses = []
            for i in range(self.config.processing_step):
                (ret_batch, ret_label, sent_num_enc, sent_num_dec,
                 sent_len) = helper.shuffleData(b_data, order_indices,
                                                self.vocab)
                feed_dict = self.create_feed_dict(ret_batch, sent_len,
                                                  sent_num_enc, ret_label,
                                                  sent_num_dec)
                loss, pred = sess.run([self.loss, self.prediction],
                                      feed_dict=feed_dict)
                pred = pred.tolist()
                order_indices = helper.reorder(order_indices, pred,
                                               sent_num_dec)
                losses.append(loss)
            total_loss.append(np.mean(losses))
        avg_loss = np.mean(total_loss)
        return avg_loss
Example #4
0
    def run_epoch(self, sess, input_data, verbose=None):
        """
        Runs an epoch of training.

        Trains the model for one-epoch.

        Args:
            sess: tf.Session() object
            input_data: tuple of (encode_input, decode_input, decode_label)
        Returns:
            avg_loss: scalar. Average minibatch loss of model on epoch.
        """
        data_len = len(input_data)
        total_steps =data_len // self.config.batch_size
        total_loss = []
        for step, (b_data, b_order) in enumerate(
                                    helper.data_iter(input_data, self.config.batch_size)):
            order_indices = b_order
            losses = []
            for i in range(self.config.processing_step):
                (ret_batch, ret_label, sent_num_enc, sent_num_dec, sent_len
                 ) = helper.shuffleData(b_data, order_indices, self.vocab)
                feed_dict = self.create_feed_dict(ret_batch, sent_len, sent_num_enc, ret_label, sent_num_dec)
                _, loss, lr, pred = sess.run([self.train_op, self.loss, self.learning_rate, self.prediction], feed_dict=feed_dict)
                pred = pred.tolist()
                order_indices = helper.reorder(order_indices, pred, sent_num_dec)
                losses.append(loss)
            total_loss.append(np.mean(losses))
            if verbose and step % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}, lr = {}'.format(
                    step, total_steps, np.mean(total_loss[-verbose:]), lr))
                sys.stdout.flush()
        sys.stdout.write('\n')
        avg_loss = np.mean(total_loss)
        return avg_loss
    def run_epoch(self, sess, data, verbose=10):
        """Runs an epoch of training.

        Trains the model for one-epoch.

        Args:
            sess: tf.Session() object
            data_x: input data, have shape of (data_num, num_steps), change it to ndarray before this function is called
            data_y: label, have shape of (data_num, class_num)
            len_list: length list correspond to data_x, have shape of (data_num)
        Returns:
            average_loss: scalar. Average minibatch loss of model on epoch.
        """
        data_len = len(data)
        total_steps = data_len // self.config.batch_size
        total_loss = []

        for step, data_batch in enumerate(
                helper.data_iter(data, self.config.batch_size)):
            feed_dict = self.create_feed_dict(data_batch, train_mode=True)
            _, loss, lr = sess.run(
                [self.train_op, self.loss, self.learning_rate],
                feed_dict=feed_dict)
            total_loss.append(loss)
            if verbose and step % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}, lr = {}'.format(
                    step, total_steps, np.mean(total_loss[-verbose:]), lr))
                sys.stdout.flush()
        return np.mean(total_loss)
Example #6
0
    def run_epoch(self, sess, input_data, verbose=None):
        """
        Runs an epoch of training.

        Trains the model for one-epoch.

        Args:
            sess: tf.Session() object
            input_data: tuple of (encode_input, decode_input, decode_label)
        Returns:
            avg_loss: scalar. Average minibatch loss of model on epoch.
        """
        data_len = len(input_data[0])
        total_steps =data_len // self.config.batch_size
        total_loss = []
        for step, (data_batch, lengths_batch) in enumerate(helper.data_iter(*(input_data + (self.config.batch_size, self.vocab)))):
            feed_dict = self.create_feed_dict(data_batch, lengths_batch[0], lengths_batch[1])
            _, loss, lr = sess.run([self.train_op, self.loss, self.learning_rate], feed_dict=feed_dict)
            total_loss.append(loss)
            if verbose and step % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}, lr = {}'.format(
                    step, total_steps, np.mean(total_loss[-verbose:]), lr))
                sys.stdout.flush()
        sys.stdout.write('\n')
        avg_loss = np.mean(total_loss)
        return avg_loss
 def fit(self, sess, data):
     data_len = len(data)
     total_loss = []
     for data_batch in helper.data_iter(data, self.config.batch_size):
         feed_dict = self.create_feed_dict(data_batch, train_mode=False)
         loss = sess.run(self.loss, feed_dict=feed_dict)
         total_loss.append(loss)
     return np.mean(total_loss)
 def ttt_predict(self, sess, data):
     """Make predictions from the provided model.
     Args:
         sess: tf.Session()
         data_x: input data matrix have the shape of (data_num, num_steps), change it to ndarray before this function is called
         len_list: input data_length have the shape of (data_num)
     Returns:
       ret_pred_prob: Probability of the prediction with respect to each class
     """
     ret_pred_prob = []
     ret_data_batch = []
     for data_batch in helper.data_iter(data, 1):
         feed_dict = self.create_feed_dict(data_batch, train_mode=False)
         pred_prob = sess.run(self.predict_prob, feed_dict=feed_dict)
         ret_pred_prob.append(pred_prob)
         ret_data_batch.append(data_batch)
     ret_pred_prob = np.concatenate(ret_pred_prob, axis=0)
     return ret_pred_prob, ret_data_batch  #b_sz, class_num| batch_data_x, batch_data_y, batch_lengths