示例#1
0
    def train(self, train_set, valid_set, test_set, optimizer, epochs):
        fit = self.compile(train_set, optimizer)

        from layers import LSTM
        fit_state = []
        for layer in self.layers:
            if isinstance(layer, LSTM):
                fit_state.append(layer.h)
                fit_state.append(layer.c)

        val = self.compile(valid_set)
        val_state = []
        for layer in self.layers:
            if isinstance(layer, LSTM):
                val_state.append(layer.h)
                val_state.append(layer.c)

        for epoch in range(1, epochs + 1):
            progress_bar = TrainProgressBar(epoch, train_set.batches,
                                            valid_set.batches)

            # fit
            fit_results = []
            for batch in range(train_set.batches / 2):
                fit_results.append(fit(batch))
                progress_bar.fit_update(fit_results)

            # reset fit state
            for state in fit_state:
                state.set_value(state.get_value() * 0.)

            # validate
            val_results = []
            for batch in range(valid_set.batches):
                val_results.append(val(batch))
                progress_bar.val_update(val_results)

            # reset val state
            for state in val_state:
                state.set_value(state.get_value() * 0.)

            self.fit_results.append(fit_results)
            self.val_results.append(val_results)

            print

            # fit
            fit_results = []
            for batch in range(train_set.batches / 2, train_set.batches):
                fit_results.append(fit(batch))
                progress_bar.fit_update(fit_results)

            # reset fit state
            for state in fit_state:
                state.set_value(state.get_value() * 0.)

            # validate
            val_results = []
            for batch in range(valid_set.batches):
                val_results.append(val(batch))
                progress_bar.val_update(val_results)

            # reset val state
            for state in val_state:
                state.set_value(state.get_value() * 0.)

            self.fit_results.append(fit_results)
            self.val_results.append(val_results)

            import numpy as np
            if epoch >= self.decay_epoch:
                self.optimizer.lr.set_value(np.cast[np.float32](
                    self.optimizer.lr.get_value() * self.decay_rate))

            print 'lr', self.optimizer.lr.get_value()

        # static test
        test = self.compile(test_set)
        test_results = []
        for batch in range(test_set.batches):
            test_results.append(test(batch))
        import numpy as np
        print 'Static pp({})'.format(np.mean(test_results))

        # reset learning rate
        self.optimizer.lr.set_value(np.cast[np.float32](1.0))

        # dynamic test
        test = self.compile(test_set, optimizer)
        test_results = []
        for batch in range(test_set.batches):
            test_results.append(test(batch))
        import numpy as np
        print 'Dynamic pp({})'.format(np.mean(test_results))
示例#2
0
def test_progress_bar():
    from utils import TrainProgressBar
    progress_bar = TrainProgressBar(1, 10, 10)
    progress_bar.fit_update(zip(range(10), range(10)))
    progress_bar.val_update(zip(range(5), range(5)))
示例#3
0
文件: model.py 项目: MorLong/norm-rnn
    def train(self, train_set, valid_set, test_set, optimizer, epochs):
        fit = self.compile(train_set, optimizer)

        from layers import LSTM
        fit_state = []
        for layer in self.layers:
            if isinstance(layer, LSTM):
                fit_state.append(layer.h)
                fit_state.append(layer.c)

        val = self.compile(valid_set)
        val_state = []
        for layer in self.layers:
            if isinstance(layer, LSTM):
                val_state.append(layer.h)
                val_state.append(layer.c)

        for epoch in range(1, epochs + 1):
            progress_bar = TrainProgressBar(epoch, train_set.batches, valid_set.batches)

            # fit
            fit_results = []
            for batch in range(train_set.batches / 2):
                fit_results.append(fit(batch))
                progress_bar.fit_update(fit_results)

            # reset fit state
            for state in fit_state:
                state.set_value(state.get_value() * 0.)

            # validate
            val_results = []
            for batch in range(valid_set.batches):
                val_results.append(val(batch))
                progress_bar.val_update(val_results)

            # reset val state
            for state in val_state:
                state.set_value(state.get_value() * 0.)

            self.fit_results.append(fit_results)
            self.val_results.append(val_results)

            print

            # fit
            fit_results = []
            for batch in range(train_set.batches / 2, train_set.batches):
                fit_results.append(fit(batch))
                progress_bar.fit_update(fit_results)

            # reset fit state
            for state in fit_state:
                state.set_value(state.get_value() * 0.)

            # validate
            val_results = []
            for batch in range(valid_set.batches):
                val_results.append(val(batch))
                progress_bar.val_update(val_results)

            # reset val state
            for state in val_state:
                state.set_value(state.get_value() * 0.)


            self.fit_results.append(fit_results)
            self.val_results.append(val_results)

            import numpy as np
	    if epoch >= self.decay_epoch:
                self.optimizer.lr.set_value(np.cast[np.float32](self.optimizer.lr.get_value() * self.decay_rate))

            print 'lr', self.optimizer.lr.get_value()


        # static test
        test = self.compile(test_set)
        test_results = []
        for batch in range(test_set.batches):
            test_results.append(test(batch))
        import numpy as np
        print 'Static pp({})'.format(np.mean(test_results))

	# reset learning rate
        self.optimizer.lr.set_value(np.cast[np.float32](1.0))

        # dynamic test
        test = self.compile(test_set, optimizer)
        test_results = []
        for batch in range(test_set.batches):
            test_results.append(test(batch))
        import numpy as np
        print 'Dynamic pp({})'.format(np.mean(test_results))