def pretrain_decade(decades): model.compile(PRETRAIN(lr=PRETRAIN_LR), "categorical_crossentropy") unbroken = True for decade in range(1, decades+1): try: model.fit(X, y, nb_epoch=10, validation_data=val) except KeyboardInterrupt: unbroken = False spoken.append("RMSprop pretrain epoch {}: {}".format(decade * 10, keras_speak(model, petofi))) print(spoken[-1]) log(spoken[-1]) if not unbroken: log("RMSprop pretrain BROKEN with KEYBOARD_INTERRUPT") return
def finetune_decade(decades): model.compile("adam", "categorical_crossentropy") unbroken = True for decade in range(1, decades+1): try: model.fit(X, y, nb_epoch=10, validation_data=val) except KeyboardInterrupt: unbroken = False spoken.append("SGD finetune epoch {}: {}".format(10 * decade, keras_speak(model, petofi))) print(spoken[-1]) log(spoken[-1]) if not unbroken: log("SGD finetune BROKEN with KEYBOARD_INTERRUPT") return
def finetune_century(century): model.compile("adam", "categorical_crossentropy") unbroken = True for decade in range(1, century+1): try: model.fit_generator(the_generator, samples_per_epoch=petofi.N, nb_epoch=1) except KeyboardInterrupt: unbroken = False spoken.append("SGD finetune epoch {}: {}".format(10 * decade, keras_speak(model, petofi))) print(spoken[-1]) log(spoken[-1]) if not unbroken: log("SGD finetune BROKEN with KEYBOARD_INTERRUPT") return
def sample(stochastic=False): smpl = keras_speak(model, petofi, stochastic, ngrams=SAMPLE_NO_NGRAMS) log(smpl) print(smpl) return smpl