コード例 #1
0
ファイル: LASNet.py プロジェクト: nicozorza/speech-to-text
    def run_epoch(self,
                  session,
                  features,
                  labels,
                  batch_size,
                  epoch,
                  use_tensorboard,
                  tensorboard_writer,
                  feed_dict=None,
                  train_flag=True):
        loss_ep = 0
        ler_ep = 0
        n_step = 0

        database = list(zip(features, labels))

        for batch in self.create_batch(database, batch_size):
            batch_features, batch_labels = zip(*batch)

            # Padding input to max_time_step of this batch
            batch_train_features, batch_train_seq_len = padSequences(
                batch_features)
            batch_train_labels, batch_train_labels_len = padSequences(
                batch_labels, dtype=np.int64, value=LASLabel.UNK_INDEX)

            input_feed_dict = {
                self.input_features: batch_train_features,
                self.input_features_length: batch_train_seq_len,
                self.input_labels: batch_train_labels,
                self.input_labels_length: batch_train_labels_len
            }

            if feed_dict is not None:
                input_feed_dict = {**input_feed_dict, **feed_dict}

            if use_tensorboard:
                s = session.run(self.merged_summary, feed_dict=input_feed_dict)
                tensorboard_writer.add_summary(s, epoch)
                use_tensorboard = False  # Only on one batch

            if train_flag:
                loss, _, ler = session.run(
                    [self.loss, self.train_op, self.ler],
                    feed_dict=input_feed_dict)
            else:
                loss, ler = session.run([self.loss, self.ler],
                                        feed_dict=input_feed_dict)

            loss_ep += loss
            ler_ep += ler
            n_step += 1
        return loss_ep / n_step, ler_ep / n_step
コード例 #2
0
    def validate(self,
                 input_seq,
                 output_seq,
                 show_partial: bool = True,
                 batch_size: int = 1):
        with self.graph.as_default():
            sess = tf.Session(graph=self.graph)
            sess.run(tf.global_variables_initializer())
            self.load_checkpoint(sess)

            acum_err = 0
            acum_loss = 0
            n_step = 0
            database = list(zip(input_seq, output_seq))
            batch_list = self.create_batch(database, batch_size)
            for batch in batch_list:
                in_seq, out_seq = zip(*batch)
                # Padding input to max_time_step of this batch
                batch_in_seq, batch_seq_len = padSequences(in_seq)
                batch_out_seq, _ = padSequences(out_seq)

                feed_dict = {
                    self.input_feature: batch_in_seq,
                    # self.seq_len: batch_seq_len,
                    self.output_feature: batch_out_seq,
                    self.tf_is_traing_pl: False
                }
                error, loss = sess.run([self.reconstruction_loss, self.loss],
                                       feed_dict=feed_dict)

                if show_partial:
                    print("Batch %d of %d, error %f" %
                          (n_step + 1, len(batch_list), error))
                acum_err += error
                acum_loss += loss
                n_step += 1
            print("Validation error: %f, loss: %f" %
                  (acum_err / n_step, acum_loss / n_step))

            sess.close()

            return acum_err / len(input_seq), acum_loss / len(input_seq)
コード例 #3
0
ファイル: LASNet.py プロジェクト: nicozorza/speech-to-text
    def predict(self, feature):
        feature = np.reshape(feature, [1, len(feature), np.shape(feature)[1]])
        with tf.Session(graph=self.graph) as sess:
            sess.run(tf.global_variables_initializer())
            self.load_checkpoint(sess)

            features, seq_len = padSequences(feature)

            feed_dict = {
                self.input_features: features,
                self.input_features_length: seq_len,
                self.tf_is_traing_pl: False
            }

            predicted = sess.run(self.decoded_ids, feed_dict=feed_dict)

            sess.close()
            return predicted[0]
コード例 #4
0
    def validate(self,
                 features,
                 labels,
                 show_partial: bool = True,
                 batch_size: int = 1):
        with self.graph.as_default():
            sess = tf.Session(graph=self.graph)
            sess.run(tf.global_variables_initializer())
            self.load_checkpoint(sess)

            acum_ler = 0
            acum_loss = 0
            n_step = 0
            database = list(zip(features, labels))
            batch_list = self.create_batch(database, batch_size)
            for batch in batch_list:
                feature, label = zip(*batch)
                # Padding input to max_time_step of this batch
                batch_features, batch_seq_len = padSequences(feature)

                # Converting to sparse representation so as to to feed SparseTensor input
                batch_labels = sparseTupleFrom(label)
                feed_dict = {
                    self.input_feature: batch_features,
                    self.seq_len: batch_seq_len,
                    self.input_label: batch_labels,
                    self.tf_is_traing_pl: False
                }
                ler, loss = sess.run([self.ler, self.logits_loss],
                                     feed_dict=feed_dict)

                if show_partial:
                    print("Batch %d of %d, ler %f" %
                          (n_step + 1, len(batch_list), ler))
                acum_ler += ler
                acum_loss += loss
                n_step += 1
            print("Validation ler: %f, loss: %f" %
                  (acum_ler / n_step, acum_loss / n_step))

            sess.close()

            return acum_ler / len(labels), acum_loss / len(labels)
コード例 #5
0
    def encode(self, feature):
        with tf.Session(graph=self.graph) as sess:
            sess.run(tf.global_variables_initializer())
            self.load_checkpoint(sess)

            # Padding input to max_time_step of this batch
            features, seq_len = padSequences([feature])

            feed_dict = {
                self.input_seq: features,
                self.seq_len: seq_len,
                self.tf_is_traing_pl: False
            }

            encoding = sess.run(self.encoder_out, feed_dict=feed_dict)

            sess.close()

            return encoding[0]
コード例 #6
0
    def predict(self, feature):

        feature = np.reshape(feature, [1, len(feature), np.shape(feature)[1]])
        with tf.Session(graph=self.graph) as sess:
            sess.run(tf.global_variables_initializer())
            self.load_checkpoint(sess)

            # Padding input to max_time_step of this batch
            features, seq_len = padSequences(feature)

            feed_dict = {
                self.input_feature: features,
                self.seq_len: seq_len,
                self.tf_is_traing_pl: False
            }

            predicted = sess.run(self.decoded, feed_dict=feed_dict)

            sess.close()
            return self.decoderOutputToText(predicted)
コード例 #7
0
ファイル: ZorzNet.py プロジェクト: nicozorza/speech-to-text
    def run_epoch(self, session, features, labels, batch_size, epoch,
                  use_tensorboard, tensorboard_writer, feed_dict=None, train_flag=True):
        loss_ep = 0
        ler_ep = 0
        n_step = 0

        database = list(zip(features, labels))

        for batch in self.create_batch(database, batch_size):
            batch_features, batch_labels = zip(*batch)

            # Padding input to max_time_step of this batch
            batch_train_features, batch_train_seq_len = padSequences(batch_features)

            # Converting to sparse representation so as to feed SparseTensor input
            batch_train_labels = sparseTupleFrom(batch_labels)

            input_feed_dict = {
                self.input_features: batch_train_features,
                self.input_features_length: batch_train_seq_len,
                self.input_labels: batch_train_labels
            }

            if feed_dict is not None:
                input_feed_dict = {**input_feed_dict, **feed_dict}

            if use_tensorboard:
                s = session.run(self.merged_summary, feed_dict=input_feed_dict)
                tensorboard_writer.add_summary(s, epoch)
                use_tensorboard = False     # Only on one batch

            if train_flag:
                loss, _, ler = session.run([self.loss, self.train_op, self.ler], feed_dict=input_feed_dict)
            else:
                loss, ler = session.run([self.loss, self.ler], feed_dict=input_feed_dict)

            loss_ep += loss
            ler_ep += ler
            n_step += 1
        return loss_ep / n_step, ler_ep / n_step
コード例 #8
0
    def train(self,
              train_features,
              train_labels,
              batch_size: int,
              training_epochs: int,
              restore_run: bool = True,
              save_partial: bool = True,
              save_freq: int = 10,
              shuffle: bool = True,
              use_tensorboard: bool = False,
              tensorboard_freq: int = 50):

        with self.graph.as_default():
            sess = tf.Session(graph=self.graph)
            sess.run(tf.global_variables_initializer())

            if restore_run:
                self.load_checkpoint(sess)

            train_writer = None
            if use_tensorboard:
                train_writer = self.create_tensorboard_writer(
                    self.network_data.tensorboard_path + '/train', self.graph)
                train_writer.add_graph(sess.graph)

            loss_ep = 0
            ler_ep = 0
            for epoch in range(training_epochs):
                epoch_time = time.time()
                loss_ep = 0
                ler_ep = 0
                n_step = 0

                database = list(zip(train_features, train_labels))

                for batch in self.create_batch(database, batch_size):
                    batch_features, batch_labels = zip(*batch)

                    # Padding input to max_time_step of this batch
                    batch_train_features, batch_train_seq_len = padSequences(
                        batch_features)

                    # Converting to sparse representation so as to feed SparseTensor input
                    batch_train_labels = sparseTupleFrom(batch_labels)

                    feed_dict = {
                        self.input_feature: batch_train_features,
                        self.seq_len: batch_train_seq_len,
                        self.input_label: batch_train_labels
                    }

                    loss, _, ler = sess.run(
                        [self.loss, self.training_op, self.ler],
                        feed_dict=feed_dict)

                    loss_ep += loss
                    ler_ep += ler
                    n_step += 1
                loss_ep = loss_ep / n_step
                ler_ep = ler_ep / n_step

                if use_tensorboard:
                    if epoch % tensorboard_freq == 0 and self.network_data.tensorboard_path is not None:

                        random_index = random.randint(0,
                                                      len(train_features) - 1)
                        feature = [train_features[random_index]]
                        label = [train_labels[random_index]]

                        # Padding input to max_time_step of this batch
                        tensorboard_features, tensorboard_seq_len = padSequences(
                            feature)

                        # Converting to sparse representation so as to to feed SparseTensor input
                        tensorboard_labels = sparseTupleFrom(label)
                        tensorboard_feed_dict = {
                            self.input_feature: tensorboard_features,
                            self.seq_len: tensorboard_seq_len,
                            self.input_label: tensorboard_labels
                        }
                        s = sess.run(self.merged_summary,
                                     feed_dict=tensorboard_feed_dict)
                        train_writer.add_summary(s, epoch)

                if save_partial:
                    if epoch % save_freq == 0:
                        self.save_checkpoint(sess)
                        self.save_model(sess)

                if shuffle:
                    aux_list = list(zip(train_features, train_labels))
                    random.shuffle(aux_list)
                    train_features, train_labels = zip(*aux_list)

                print(
                    "Epoch %d of %d, loss %f, ler %f, epoch time %.2fmin, ramaining time %.2fmin"
                    % (epoch + 1, training_epochs, loss_ep, ler_ep,
                       (time.time() - epoch_time) / 60,
                       (training_epochs - epoch - 1) *
                       (time.time() - epoch_time) / 60))

            # save result
            self.save_checkpoint(sess)
            self.save_model(sess)

            sess.close()

            return ler_ep, loss_ep
コード例 #9
0
    def train(self,
              input_seq,
              output_seq,
              batch_size: int,
              training_epochs: int,
              restore_run: bool = True,
              save_partial: bool = True,
              save_freq: int = 10,
              shuffle: bool = True,
              use_tensorboard: bool = False,
              tensorboard_freq: int = 50):

        with self.graph.as_default():
            sess = tf.Session(graph=self.graph)
            sess.run(tf.global_variables_initializer())

            if restore_run:
                self.load_checkpoint(sess)

            train_writer = None
            if use_tensorboard:
                if self.network_data.tensorboard_path is not None:
                    # Set up tensorboard summaries and saver
                    if tf.gfile.Exists(self.network_data.tensorboard_path +
                                       '/train') is not True:
                        tf.gfile.MkDir(self.network_data.tensorboard_path +
                                       '/train')
                    else:
                        tf.gfile.DeleteRecursively(
                            self.network_data.tensorboard_path + '/train')

                train_writer = tf.summary.FileWriter(
                    "{}train".format(self.network_data.tensorboard_path),
                    self.graph)
                train_writer.add_graph(sess.graph)

            loss_ep = 0
            for epoch in range(training_epochs):
                epoch_time = time.time()
                loss_ep = 0
                n_step = 0

                database = list(zip(input_seq, output_seq))

                for batch in self.create_batch(database, batch_size):
                    batch_in_seq, batch_out_seq = zip(*batch)

                    # Padding input to max_time_step of this batch
                    batch_train_in_seq, batch_train_seq_len = padSequences(
                        batch_in_seq)
                    batch_train_out_seq, _ = padSequences(batch_out_seq)

                    feed_dict = {
                        self.input_seq: batch_train_in_seq,
                        self.seq_len: batch_train_seq_len,
                        self.output_seq: batch_train_in_seq
                    }

                    loss, _ = sess.run([self.loss, self.optimizer],
                                       feed_dict=feed_dict)

                    loss_ep += loss
                    n_step += 1
                loss_ep = loss_ep / n_step

                if use_tensorboard:
                    if epoch % tensorboard_freq == 0 and self.network_data.tensorboard_path is not None:

                        random_index = random.randint(0, len(input_seq) - 1)
                        in_seq = [input_seq[random_index]]
                        out_seq = [output_seq[random_index]]

                        # Padding input to max_time_step of this batch
                        tensorboard_in_seq, tensorboard_seq_len = padSequences(
                            in_seq)
                        tensorboard_out_seq, _ = padSequences(out_seq)

                        tensorboard_feed_dict = {
                            self.input_seq: tensorboard_in_seq,
                            self.seq_len: tensorboard_seq_len,
                            self.output_seq: tensorboard_out_seq
                        }
                        s = sess.run(self.merged_summary,
                                     feed_dict=tensorboard_feed_dict)
                        train_writer.add_summary(s, epoch)

                if save_partial:
                    if epoch % save_freq == 0:
                        self.save_checkpoint(sess)
                        self.save_model(sess)

                if shuffle:
                    aux_list = list(zip(input_seq, output_seq))
                    random.shuffle(aux_list)
                    input_seq, output_seq = zip(*aux_list)

                print(
                    "Epoch %d of %d, loss %f, epoch time %.2fmin, ramaining time %.2fmin"
                    % (epoch + 1, training_epochs, loss_ep,
                       (time.time() - epoch_time) / 60,
                       (training_epochs - epoch - 1) *
                       (time.time() - epoch_time) / 60))

            # save result
            self.save_checkpoint(sess)
            self.save_model(sess)

            sess.close()

            return loss_ep