コード例 #1
0
def train_and_eval(runid, module_prep_model, c, glove, vocab, gr, grt, do_eval=True):
    print('Model')
    model = build_model(glove, vocab, module_prep_model, c)

    print('Training')
    if c.get('balance_class', False):
        one_ratio = np.sum(gr['score'] == 1) / len(gr['score'])
        class_weight = {'score': {0: one_ratio, 1: 0.5}}
    else:
        class_weight = {}
    # XXX: samples_per_epoch is in brmson/keras fork, TODO fit_generator()?
    model.fit(gr, validation_data=grt,  # show_accuracy=True,
              callbacks=[ModelCheckpoint('para-weights-'+runid+'-bestval.h5', save_best_only=True),
                         EarlyStopping(patience=3)],
              class_weight=class_weight,
              batch_size=c['batch_size'], nb_epoch=c['nb_epoch'])
    model.save_weights('para-weights-'+runid+'-final.h5', overwrite=True)
    if c['ptscorer'] is None:
        model.save_weights('para-weights-'+runid+'-bestval.h5', overwrite=True)
    model.load_weights('para-weights-'+runid+'-bestval.h5')

    if do_eval:
        print('Predict&Eval (best val epoch)')
        ev.eval_para(model.predict(gr)['score'][:,0], gr['score'], 'Train')
        ev.eval_para(model.predict(grt)['score'][:,0], grt['score'], 'Val')
    return model
コード例 #2
0
ファイル: para.py プロジェクト: brmson/dataset-sts
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf), (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = self.predict(model, gr)
         res.append(ev.eval_para(ypred, gr['score'], fname))
     return tuple(res)
コード例 #3
0
ファイル: para.py プロジェクト: protonish/dataset-sts
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf),
                       (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = model.predict(gr)['score'][:, 0]
         res.append(ev.eval_para(ypred, gr['score'], fname))
     return tuple(res)
コード例 #4
0
def train_and_eval(runid,
                   module_prep_model,
                   c,
                   glove,
                   vocab,
                   gr,
                   grt,
                   do_eval=True):
    print('Model')
    model = build_model(glove, vocab, module_prep_model, c)

    print('Training')
    if c.get('balance_class', False):
        one_ratio = np.sum(gr['score'] == 1) / len(gr['score'])
        class_weight = {'score': {0: one_ratio, 1: 0.5}}
    else:
        class_weight = {}
    # XXX: samples_per_epoch is in brmson/keras fork, TODO fit_generator()?
    model.fit(
        gr,
        validation_data=grt,  # show_accuracy=True,
        callbacks=[
            ModelCheckpoint('para-weights-' + runid + '-bestval.h5',
                            save_best_only=True),
            EarlyStopping(patience=3)
        ],
        class_weight=class_weight,
        batch_size=c['batch_size'],
        nb_epoch=c['nb_epoch'])
    model.save_weights('para-weights-' + runid + '-final.h5', overwrite=True)
    if c['ptscorer'] is None:
        model.save_weights('para-weights-' + runid + '-bestval.h5',
                           overwrite=True)
    model.load_weights('para-weights-' + runid + '-bestval.h5')

    if do_eval:
        print('Predict&Eval (best val epoch)')
        ev.eval_para(model.predict(gr)['score'][:, 0], gr['score'], 'Train')
        ev.eval_para(model.predict(grt)['score'][:, 0], grt['score'], 'Val')
    return model
コード例 #5
0
ファイル: para.py プロジェクト: lizihan021/Hotpot
 def eval(self, model):
     res = []
     for gr, fname in [(self.gr, self.trainf), (self.grv, self.valf),
                       (self.grt, self.testf)]:
         if gr is None:
             res.append(None)
             continue
         ypred = self.predict(model, gr)
         tmp = []
         for yp in ypred:
             tmp.append(yp[0])
         ypred = np.array(tmp)
         res.append(ev.eval_para(ypred, gr['score'], fname))
     return tuple(res)
コード例 #6
0
def transfer_eval(runid, weightsf, module_prep_model, c, glove, vocab, gr, grv):
    print('Model')
    model = para_train.build_model(glove, vocab, module_prep_model, c, spad=spad, optimizer=c['opt'], fix_layers=c['fix_layers'])
    print('Model (weights)')
    model.load_weights(weightsf)
    ev.eval_para(model.predict(grv)['score'][:,0], grv['score'], 'para Val (bef. train)')

    print('Training')
    if c.get('balance_class', False):
        one_ratio = np.sum(gr['score'] == 1) / len(gr['score'])
        class_weight = {'score': {0: one_ratio, 1: 0.5}}
    else:
        class_weight = {}
    model.fit(gr, validation_data=grv,
              callbacks=[ModelCheckpoint('para-weights-'+runid+'-bestval.h5', save_best_only=True),
                         EarlyStopping(patience=4)],
              class_weight=class_weight,
              batch_size=conf['batch_size'], nb_epoch=conf['nb_epoch'], samples_per_epoch=int(len(gr['score'])*conf['epoch_fract']))
    model.save_weights('para-weights-'+runid+'-final.h5', overwrite=True)

    print('Predict&Eval (best epoch)')
    model.load_weights('para-weights-'+runid+'-bestval.h5')
    ev.eval_para(model.predict(grv)['score'][:,0], grv['score'], 'para Val')
コード例 #7
0
    acc, f1 = [], []
    accv, f1v = [], []
    acct, f1t = [], []
    for i in range(niter):
        runid = '%s-%x-%02d' % (modelname, h, i)
        print('RunID: %s  (%s)' % (runid, ps))

        model = para_train.train_and_eval(runid, module.prep_model, conf, glove, vocab, gr, grt, do_eval=False)

        print('Predict&Eval (best val epoch)')
        ypred = model.predict(gr)['score'][:,0]
        ypredv = model.predict(grv)['score'][:,0]
        ypredt = model.predict(grt)['score'][:,0]

        acc_, f1_ = ev.eval_para(ypred, y, trainf)
        acc.append(acc_)
        f1.append(f1_)
        acc_, f1_ = ev.eval_para(ypredv, yv, valf)
        accv.append(acc_)
        f1v.append(f1_)
        acc_, f1_ = ev.eval_para(ypredt, yt, testf)
        acct.append(acc_)
        f1t.append(f1_)

        rdata = {'ps': ps, 'ypred': (ypred, ypredv, ypredt), 'acc': (acc, accv, acct), 'f1': (f1, f1v, f1t)}
        pickle.dump(rdata, open('%s-res.pickle' % (runid,), 'wb'), protocol=2)

    bacc = stat(niter, trainf, 'Accuracy', acc)
    bf1 = stat(niter, trainf, 'F-score', f1)
    baccv = stat(niter, valf, 'Accuracy', accv)
コード例 #8
0
        model = para_train.train_and_eval(runid,
                                          module.prep_model,
                                          conf,
                                          glove,
                                          vocab,
                                          gr,
                                          grt,
                                          do_eval=False)

        print('Predict&Eval (best val epoch)')
        ypred = model.predict(gr)['score'][:, 0]
        ypredv = model.predict(grv)['score'][:, 0]
        ypredt = model.predict(grt)['score'][:, 0]

        acc_, f1_ = ev.eval_para(ypred, y, trainf)
        acc.append(acc_)
        f1.append(f1_)
        acc_, f1_ = ev.eval_para(ypredv, yv, valf)
        accv.append(acc_)
        f1v.append(f1_)
        acc_, f1_ = ev.eval_para(ypredt, yt, testf)
        acct.append(acc_)
        f1t.append(f1_)

        rdata = {
            'ps': ps,
            'ypred': (ypred, ypredv, ypredt),
            'acc': (acc, accv, acct),
            'f1': (f1, f1v, f1t)
        }