Пример #1
0
def train_and_eval(runid, module_prep_model, c, glove, vocab, gr, s0, grt, s0t, do_eval=True):
    print('Model')
    model = build_model(glove, vocab, module_prep_model, c)

    print('Training')
    if c.get('balance_class', False):
        one_ratio = np.sum(gr['score'] == 1) / len(gr['score'])
        class_weight = {'score': {0: one_ratio, 1: 0.5}}
    else:
        class_weight = {}
    # XXX: samples_per_epoch is in brmson/keras fork, TODO fit_generator()?
    model.fit(gr, validation_data=grt,
              callbacks=[HypEvCB(s0t, grt),
                         ModelCheckpoint('hyp-weights-'+runid+'-bestval.h5', save_best_only=True, monitor='acc', mode='max'),
                         EarlyStopping(monitor='acc', mode='max', patience=4)],
              class_weight=class_weight,
              batch_size=c['batch_size'], nb_epoch=c['nb_epoch'])
    model.save_weights('hyp-weights-'+runid+'-final.h5', overwrite=True)
    if c['ptscorer'] is None:
        model.save_weights('hyp-weights-'+runid+'-bestval.h5', overwrite=True)
    model.load_weights('hyp-weights-'+runid+'-bestval.h5')

    if do_eval:
        print('Predict&Eval (best epoch)')
        prediction = model.predict(gr)['score'][:,0]
        prediction_t = model.predict(grt)['score'][:,0]
        ev.eval_hypev(prediction, s0, gr['score'], 'Train')
        ev.eval_hypev(prediction_t, s0t, grt['score'], 'Val')
        dump_questions(s0, s1, gr['score'], prediction, 'Train')
        dump_questions(s0t, s1t, grt['score'], prediction_t, 'Val')
    return model
Пример #2
0
def train_and_eval(runid,
                   module_prep_model,
                   c,
                   glove,
                   vocab,
                   gr,
                   s0,
                   grt,
                   s0t,
                   do_eval=True):
    print('Model')
    model = build_model(glove, vocab, module_prep_model, c)

    print('Training')
    if c.get('balance_class', False):
        one_ratio = np.sum(gr['score'] == 1) / len(gr['score'])
        class_weight = {'score': {0: one_ratio, 1: 0.5}}
    else:
        class_weight = {}
    # XXX: samples_per_epoch is in brmson/keras fork, TODO fit_generator()?
    model.fit(gr,
              validation_data=grt,
              callbacks=[
                  HypEvCB(s0t, grt),
                  ModelCheckpoint('hyp-weights-' + runid + '-bestval.h5',
                                  save_best_only=True,
                                  monitor='acc',
                                  mode='max'),
                  EarlyStopping(monitor='acc', mode='max', patience=4)
              ],
              class_weight=class_weight,
              batch_size=c['batch_size'],
              nb_epoch=c['nb_epoch'])
    model.save_weights('hyp-weights-' + runid + '-final.h5', overwrite=True)
    if c['ptscorer'] is None:
        model.save_weights('hyp-weights-' + runid + '-bestval.h5',
                           overwrite=True)
    model.load_weights('hyp-weights-' + runid + '-bestval.h5')

    if do_eval:
        print('Predict&Eval (best epoch)')
        prediction = model.predict(gr)['score'][:, 0]
        prediction_t = model.predict(grt)['score'][:, 0]
        ev.eval_hypev(prediction, s0, gr['score'], 'Train')
        ev.eval_hypev(prediction_t, s0t, grt['score'], 'Val')
        dump_questions(s0, s1, gr['score'], prediction, 'Train')
        dump_questions(s0t, s1t, grt['score'], prediction_t, 'Val')
    return model
Пример #3
0
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf), (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = model.predict(gr)['score'][:,0]
         res.append(ev.eval_hypev(ypred, gr['score'], fname))
     return tuple(res)
Пример #4
0
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf), (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = self.predict(model, gr)
         res.append(ev.eval_hypev(gr.get('qids', None), ypred, gr['score'], fname))
     return tuple(res)
Пример #5
0
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf),
                       (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = model.predict(gr)['score'][:, 0]
         res.append(ev.eval_hypev(ypred, gr['score'], fname))
     return tuple(res)
Пример #6
0
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf),
                       (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = self.predict(model, gr)
         res.append(
             ev.eval_hypev(gr.get('qids', None), ypred, gr['score'], fname))
     return tuple(res)