Beispiel #1
0
    def __init__(self, lm, recalculate_props=False, directory='searcher'):
        self.lm = lm

        # load all the variables and parameters
        # I'm fixing the file locations by hand because lazy.
        self.gen_config = gen_model_beam_search.Config(lm)
        self.gen_config.load(directory + '/gen.parameters')
        self.gen_var = gen_model_beam_search.Variables(self.gen_config)
        self.gen_var.load(directory + '/gen.weights')

        self.pred_config = pred_model_run.Config(lm)
        self.pred_config.load(directory + '/pred.parameters')
        self.pred_var = pred_model_run.Variables(self.pred_config)
        self.pred_var.load(directory + '/pred.weights')

        self.payout_config = payout_model_run.Config(lm)
        self.payout_config.load(directory + '/payout.parameters')
        self.payout_var = payout_model_run.Variables(self.payout_config)
        self.payout_var.load(directory + '/payout.weights')

        # beam search interface
        self.bsi = gen_model_beam_search.BeamSearchInterface([self.gen_var] *
                                                             GEN_ENSEMBLE)

        # remember the answer so that we don't need to constantly recalculate it
        file_path = directory + '/pred_database'
        if os.path.isfile(file_path) and not recalculate_props:
            print 'loading proposition vectors'
            with open(file_path, 'rb') as handle:
                self.pred_database = pickle.load(handle)
        else:
            print 'using proposition vectors at ' + file_path
            self.initialize_pred(file_path)
Beispiel #2
0
# text = file_contents()
# database = meta_math_database(text,n=2000)
with open('lm', 'rb') as handle:
    database = pickle.load(handle)
database.remember_proof_steps = False
language_model = LanguageModel(database)

print()
print()

# this is the main routine for actual training
#import pred_model as model
#import gen_model_train as model
import payout_model_5_train as model

config = model.Config(language_model)
config.p.lr = 1.0e-5
config.p.r = 128
config.regularization = 1.0e-4
config.p.lr_reduction = 10
config.p.gru_depth = 2
#config.p.dropout = None
#config.p.augmentation= False
config.p.structure_data = True
#config.p.attention=True
#config.p.full_state_attention=False
config.p.bidirectional = True
config.p.out_layers = 1

config.p.max_epochs = None
train_object = trainer.Trainer(config,