def load_model(trainf, model_name, weights_file): params = [] module = importlib.import_module('.' + modelname, 'models') conf, ps, h = anssel_train.config(module.config, params) s0, s1, y, vocab, gr = anssel_train.load_set(trainf) model = anssel_train.build_model(glove, vocab, module.prep_model, conf) model.load_weights(weights_file) return model, vocab
def load_model(trainf, model_name, weights_file): params = [] module = importlib.import_module('.'+modelname, 'models') conf, ps, h = anssel_train.config(module.config, params) s0, s1, y, vocab, gr = anssel_train.load_set(trainf) model = anssel_train.build_model(glove, vocab, module.prep_model, conf) model.load_weights(weights_file) return model, vocab
last_is0 = '' for is0, is1, iy in zip(s0, s1, y): if hash(tuple(is0)) != last_is0: last_is0 = hash(tuple(is0)) m = 0 n += 1 print('%d 0 %d 1 %f %s' % (n, m, iy, code), file=f) m += 1 if __name__ == "__main__": modelname, weightsfile, trainf, valf, trec_qrels_file, trec_top_file = sys.argv[1:7] params = sys.argv[7:] module = importlib.import_module('.'+modelname, 'models') conf, ps, h = anssel_train.config(module.config, params) print('GloVe') glove = emb.GloVe(N=conf['embdim']) print('Dataset') s0, s1, y, vocab, gr = anssel_train.load_set(trainf) s0t, s1t, yt, _, grt = anssel_train.load_set(valf, vocab) print('Model') model = anssel_train.build_model(glove, vocab, module.prep_model, conf) print('Weights') model.load_weights(weightsfile) print('Prediction')
cnninit=['glorot_uniform', 'glorot_uniform', 'normal'], cdim={ 1: [0, 0, 1 / 2, 1, 2], 2: [0, 0, 1 / 2, 1, 2, 0], 3: [0, 0, 1 / 2, 1, 2, 0], 4: [0, 0, 1 / 2, 1, 2, 0], 5: [0, 0, 1 / 2, 1, 2] }, project=[True, True, False], pdim=[1, 2, 2.5, 3], ptscorer=[B.mlp_ptscorer], Ddim=[1, 2, 2.5, 3]) for ps, h, pardict in rs(): print(' ...... %s .................... %s' % (h, ps)) conf, ps, h = anssel_train.config(module.config, ps) model = anssel_train.build_model(glove, vocab, module.prep_model, conf) runid = '%s_%x' % (modelname, h) model.fit(gr, validation_data=grt, callbacks=[ AnsSelCB(s0t, grt), ModelCheckpoint('weights-' + runid + '.h5', save_best_only=True, monitor='mrr', mode='max'), EarlyStopping(monitor='mrr', mode='max', patience=1) ], batch_size=160, nb_epoch=16,
module = importlib.import_module('.'+modelname, 'models') s0, s1, y, vocab, gr = anssel_train.load_set(trainf) s0t, s1t, yt, _, grt = anssel_train.load_set(valf, vocab) glove = emb.GloVe(300) # XXX hardcoded N rs = RandomSearch(modelname+'_rlog.txt', dropout=[1/2, 2/3, 3/4], inp_e_dropout=[1/2, 3/4, 4/5], l2reg=[1e-4, 1e-3, 1e-2], cnnact=['tanh', 'tanh', 'relu'], cnninit=['glorot_uniform', 'glorot_uniform', 'normal'], cdim={1: [0,0,1/2,1,2], 2: [0,0,1/2,1,2,0], 3: [0,0,1/2,1,2,0], 4: [0,0,1/2,1,2,0], 5: [0,0,1/2,1,2]}, project=[True, True, False], pdim=[1, 2, 2.5, 3], ptscorer=[B.mlp_ptscorer], Ddim=[1, 2, 2.5, 3]) for ps, h, pardict in rs(): conf, ps, h = anssel_train.config(module.config, []) for k, v in pardict.items(): conf[k] = v ps, h = hash_params(conf) runid = '%s_%x' % (modelname, h) print(' ...... %x .................... %s' % (h, ps)) try: model = anssel_train.build_model(glove, vocab, module.prep_model, conf) model.fit(gr, validation_data=grt, callbacks=[AnsSelCB(s0t, grt), ModelCheckpoint('weights-'+runid+'.h5', save_best_only=True, monitor='mrr', mode='max'), EarlyStopping(monitor='mrr', mode='max', patience=4)], batch_size=160, nb_epoch=16, samples_per_epoch=int(len(s0)/4)) # mrr = max(hist.history['mrr']) model.load_weights('weights-'+runid+'.h5')