Пример #1
0
class TestRNNLM(unittest.TestCase):
    def setUp(self):
        text = 'You said good-bye and I said hello.'
        cbm = CountBasedMethod()
        word_list = cbm.text_to_word_list(text)
        word_to_id, *_ = cbm.preprocess(word_list)
        vocab_size = len(word_to_id)
        wordvec_size = 100
        hidden_size  = 100
        self.rnnlm = RNNLM(vocab_size, wordvec_size, hidden_size)
        self.xs = np.array([
            [0, 4, 4, 1],
            [4, 0, 2, 1]
        ])
        self.ts = np.array([
            [0, 1, 0, 0],
            [0, 0, 0, 1]
        ])

    def test_predict(self):
        score = self.rnnlm._predict(self.xs)
        self.assertEqual((2, 4, 7), score.shape)

    def test_forward(self):
        loss = self.rnnlm.forward(self.xs, self.ts)
        self.assertEqual(1.94, round(loss, 2))

    def test_backward(self):
        self.rnnlm.forward(self.xs, self.ts)
        dout = self.rnnlm.backward()
        self.assertEqual(None, dout)

    def test_reset_state(self):
        self.rnnlm.forward(self.xs, self.ts)
        self.rnnlm.backward()
        self.assertEqual((2, 100), self.rnnlm.lstm_layer.h.shape)
        self.rnnlm.reset_state()
        self.assertEqual(None, self.rnnlm.lstm_layer.h)

    def test_save_params(self):
        self.rnnlm.forward(self.xs, self.ts)
        self.rnnlm.backward()
        self.rnnlm.save_params()
        self.assertEqual(True, path.exists('../pkl/rnnlm.pkl'))

    def test_load_params(self):
        self.rnnlm.load_params()
        a, b, c, d, e, f = self.rnnlm.params
        self.assertEqual((7, 100), a.shape)
        self.assertEqual((100, 400), b.shape)
        self.assertEqual((100, 400), c.shape)
        self.assertEqual((400,), d.shape)
        self.assertEqual((100, 7), e.shape)
        self.assertEqual((7,), f.shape)
corpus_test, *_ = load_data('test')
vocab_size = len(word_to_id)
xs = corpus[:-1]
ts = corpus[1:]

# Generate a model, optimiser and trainer
model = RNNLM(vocab_size, wordvec_size, hidden_size)
optimiser = SGD(learning_rate)
trainer = RNNLMTrainer(model, optimiser)

# 1. Train applying gradients clipping
training_process = trainer.fit(xs,
                               ts,
                               max_epoch,
                               batch_size,
                               time_size,
                               max_grad,
                               eval_interval=20)
for iter in training_process:
    print(iter)
file_path = '../img/train_rnnlm.png'
tainer.save_plot_image(file_path, ylim=(0, 500))

# 2. Evaluate by test data
model.reset_state()
ppl_test = eval_perplexity(model, corpus_test)
print('Test perplexity: ', ppl_test)

# 3. Save parameters
model.save_params()
Пример #3
0
def train(train_path,
          validation_path,
          dictionary_path,
          model_path,
          reload_state=False,
          dim_word=100, # word vector dimensionality
          dim=1000, # the number of LSTM units
          encoder='lstm',
          patience=10,
          max_epochs=5000,
          dispFreq=100,
          decay_c=0., 
          alpha_c=0., 
          diag_c=0.,
          lrate=0.01, 
          n_words=100000,
          maxlen=100, # maximum length of the description
          optimizer='rmsprop', 
          batch_size = 16,
          valid_batch_size = 16,
          validFreq=1000,
          saveFreq=1000, # save the parameters after every saveFreq updates
          sampleFreq=100, # generate some text samples after every sampleFreq updates
          profile=False):

    # Model options
    model_options = locals().copy()

    worddicts = dict()
    worddicts_r = dict()
    with open(dictionary_path, 'rb') as f:
        for (i, line) in enumerate(f):
            word = line.strip()
            code = i + 2
            worddicts_r[code] = word
            worddicts[word] = code

    # reload options
    if reload_state and os.path.exists(model_path):
        with open('%s.pkl' % model_path, 'rb') as f:
            models_options = pkl.load(f)

    print '### Loading data.'

    train = TextIterator(train_path, 
                         worddicts,
                         n_words_source=n_words, 
                         batch_size=batch_size,
                         maxlen=maxlen)
    valid = TextIterator(validation_path, 
                         worddicts,
                         n_words_source=n_words, 
                         batch_size=valid_batch_size,
                         maxlen=maxlen)

    print '### Building neural network.'

    rnnlm = RNNLM(model_options)
    trainer = ModelTrainer(rnnlm, optimizer, model_options)
    sampler = TextSampler(rnnlm, model_options)

    print '### Training neural network.'

    best_params = None
    bad_count = 0

    if validFreq == -1:
        validFreq = len(train[0])/batch_size
    if saveFreq == -1:
        saveFreq = len(train[0])/batch_size
    if sampleFreq == -1:
        sampleFreq = len(train[0])/batch_size

    uidx = 0
    estop = False
    for eidx in xrange(max_epochs):
        n_samples = 0

        for x in train:
            n_samples += len(x)
            uidx += 1

            x, x_mask = prepare_data(x, maxlen=maxlen, n_words=n_words)

            if x == None:
                print 'Minibatch with zero sample under length ', maxlen
                uidx -= 1
                continue

            ud_start = time.time()
            cost = trainer.f_grad_shared(x, x_mask)
            trainer.f_update(lrate)
            ud = time.time() - ud_start

            if numpy.isnan(cost) or numpy.isinf(cost):
                print 'NaN detected'
                return 1., 1., 1.

            if numpy.mod(uidx, dispFreq) == 0:
                print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud

            if numpy.mod(uidx, saveFreq) == 0:
                # Save the best parameters, or the current state if best_params
                # is None.
                rnnlm.save_params(best_params)
                # Save the training options.
                pkl.dump(model_options, open('%s.pkl' % model_path, 'wb'))

            if numpy.mod(uidx, sampleFreq) == 0:
                # FIXME: random selection?
                for jj in xrange(5):
                    sample, score = sampler.generate()
                    print 'Sample ', jj, ': ',
                    ss = sample
                    for vv in ss:
                        if vv == 0:
                            break
                        if vv in worddicts_r:
                            print worddicts_r[vv], 
                        else:
                            print 'UNK',
                    print

            if numpy.mod(uidx, validFreq) == 0:
                valid_errs = pred_probs(f_log_probs, prepare_data, model_options, valid)
                valid_err = valid_errs.mean()
                rnnlm.error_history.append(valid_err)

                if uidx == 0 or valid_err <= numpy.array(error_history).min():
                    best_params = rnnlm.get_param_values()
                    bad_counter = 0
                if len(rnnlm.error_history) > patience and valid_err >= numpy.array(rnnlm.error_history)[:-patience].min():
                    bad_counter += 1
                    if bad_counter > patience:
                        print 'Early Stop!'
                        estop = True
                        break

                if numpy.isnan(valid_err):
                    import ipdb; ipdb.set_trace()

                print 'Valid ', valid_err

        print 'Seen %d samples'%n_samples

        if estop:
            break

    if best_params is not None:
        rnnlm.set_param_values(best_params)

    valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()

    print 'Valid ', valid_err

    params = copy.copy(best_params)
    numpy.savez(model_path, zipped_params=best_params, 
                error_history=rnnlm.error_history, 
                **params)

    return valid_err
Пример #4
0
def train(
        train_path,
        validation_path,
        dictionary_path,
        model_path,
        reload_state=False,
        dim_word=100,  # word vector dimensionality
        dim=1000,  # the number of LSTM units
        encoder='lstm',
        patience=10,
        max_epochs=5000,
        dispFreq=100,
        decay_c=0.,
        alpha_c=0.,
        diag_c=0.,
        lrate=0.01,
        n_words=100000,
        maxlen=100,  # maximum length of the description
        optimizer='rmsprop',
        batch_size=16,
        valid_batch_size=16,
        validFreq=1000,
        saveFreq=1000,  # save the parameters after every saveFreq updates
        sampleFreq=100,  # generate some text samples after every sampleFreq updates
        profile=False):

    # Model options
    model_options = locals().copy()

    worddicts = dict()
    worddicts_r = dict()
    with open(dictionary_path, 'rb') as f:
        for (i, line) in enumerate(f):
            word = line.strip()
            code = i + 2
            worddicts_r[code] = word
            worddicts[word] = code

    # reload options
    if reload_state and os.path.exists(model_path):
        with open('%s.pkl' % model_path, 'rb') as f:
            models_options = pkl.load(f)

    print '### Loading data.'

    train = TextIterator(train_path,
                         worddicts,
                         n_words_source=n_words,
                         batch_size=batch_size,
                         maxlen=maxlen)
    valid = TextIterator(validation_path,
                         worddicts,
                         n_words_source=n_words,
                         batch_size=valid_batch_size,
                         maxlen=maxlen)

    print '### Building neural network.'

    rnnlm = RNNLM(model_options)
    trainer = ModelTrainer(rnnlm, optimizer, model_options)
    sampler = TextSampler(rnnlm, model_options)

    print '### Training neural network.'

    best_params = None
    bad_count = 0

    if validFreq == -1:
        validFreq = len(train[0]) / batch_size
    if saveFreq == -1:
        saveFreq = len(train[0]) / batch_size
    if sampleFreq == -1:
        sampleFreq = len(train[0]) / batch_size

    uidx = 0
    estop = False
    for eidx in xrange(max_epochs):
        n_samples = 0

        for x in train:
            n_samples += len(x)
            uidx += 1

            x, x_mask = prepare_data(x, maxlen=maxlen, n_words=n_words)

            if x == None:
                print 'Minibatch with zero sample under length ', maxlen
                uidx -= 1
                continue

            ud_start = time.time()
            cost = trainer.f_grad_shared(x, x_mask)
            trainer.f_update(lrate)
            ud = time.time() - ud_start

            if numpy.isnan(cost) or numpy.isinf(cost):
                print 'NaN detected'
                return 1., 1., 1.

            if numpy.mod(uidx, dispFreq) == 0:
                print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud

            if numpy.mod(uidx, saveFreq) == 0:
                # Save the best parameters, or the current state if best_params
                # is None.
                rnnlm.save_params(best_params)
                # Save the training options.
                pkl.dump(model_options, open('%s.pkl' % model_path, 'wb'))

            if numpy.mod(uidx, sampleFreq) == 0:
                # FIXME: random selection?
                for jj in xrange(5):
                    sample, score = sampler.generate()
                    print 'Sample ', jj, ': ',
                    ss = sample
                    for vv in ss:
                        if vv == 0:
                            break
                        if vv in worddicts_r:
                            print worddicts_r[vv],
                        else:
                            print 'UNK',
                    print

            if numpy.mod(uidx, validFreq) == 0:
                valid_errs = pred_probs(f_log_probs, prepare_data,
                                        model_options, valid)
                valid_err = valid_errs.mean()
                rnnlm.error_history.append(valid_err)

                if uidx == 0 or valid_err <= numpy.array(error_history).min():
                    best_params = rnnlm.get_param_values()
                    bad_counter = 0
                if len(rnnlm.error_history
                       ) > patience and valid_err >= numpy.array(
                           rnnlm.error_history)[:-patience].min():
                    bad_counter += 1
                    if bad_counter > patience:
                        print 'Early Stop!'
                        estop = True
                        break

                if numpy.isnan(valid_err):
                    import ipdb
                    ipdb.set_trace()

                print 'Valid ', valid_err

        print 'Seen %d samples' % n_samples

        if estop:
            break

    if best_params is not None:
        rnnlm.set_param_values(best_params)

    valid_err = pred_probs(f_log_probs, prepare_data, model_options,
                           valid).mean()

    print 'Valid ', valid_err

    params = copy.copy(best_params)
    numpy.savez(model_path,
                zipped_params=best_params,
                error_history=rnnlm.error_history,
                **params)

    return valid_err