コード例 #1
0
ファイル: dan.py プロジェクト: jankim/qb
        print time.time() - ep_t
        print 'done with epoch ', epoch, ' epoch error = ', epoch_error, ' min error = ', min_error
        lstring = 'done with epoch ' + str(epoch) + ' epoch error = ' + str(epoch_error) \
                 + ' min error = ' + str(min_error) + '\n\n'
        log.write(lstring)
        log.flush()

        # save parameters if the current model is better than previous best model
        if epoch_error < min_error:
            min_error = epoch_error
            print 'saving model...'
            params = unroll_params(r, d, len_voc, deep = 3)
            cPickle.dump( params, open(param_file, 'wb'))

        # reset adagrad weights
        if epoch % args['adagrad_reset'] == 0 and epoch != 0:
            ag.reset_weights()

        # check accuracy on validation set
        if epoch % args['do_val'] == 0 and epoch != 0:
            print 'validating...'
            params = unroll_params(r, d, len_voc, deep = 3)
            evaluate(train_qs, val_qs, params, d)
            print '\n\n'

    log.close()

    print 'step 2 of 2: training classifier over all answers (takes 10-15 hours depending on number of answers)'
    params = cPickle.load(open(param_file, 'rb'))
    evaluate(train_qs, val_qs, params, d)
コード例 #2
0
##   and compare performance to bag of words and dependency relation baselines
## - be sure to train a model first by running qanta.py

if __name__ == '__main__':

    # command line arguments
    parser = argparse.ArgumentParser(description='QANTA evaluation')
    parser.add_argument('-data',
                        help='location of dataset',
                        default='data/hist_split')
    parser.add_argument('-model',
                        help='location of trained model',
                        default='models/hist_params')
    parser.add_argument('-d',
                        help='word embedding dimension',
                        type=int,
                        default=100)

    args = vars(parser.parse_args())

    print 'qanta performance: '
    evaluate(args['data'], args['model'], args['d'], rnn_feats=True, \
              bow_feats=False, rel_feats=False)

    print '\n\n\n bow performance: '
    evaluate(args['data'], args['model'], args['d'], rnn_feats=False, \
              bow_feats=True, rel_feats=False)

    print '\n\n\n bow-dt performance: '
    evaluate(args['data'], args['model'], args['d'], rnn_feats=False, \
              bow_feats=True, rel_feats=True)
コード例 #3
0
ファイル: evaluate_qanta.py プロジェクト: arcecio/qanta
from classify.learn_classifiers import evaluate
import argparse

## - evaluate QANTA's learned representations on history questions
##   and compare performance to bag of words and dependency relation baselines
## - be sure to train a model first by running qanta.py

if __name__ == '__main__':
    
    # command line arguments
    parser = argparse.ArgumentParser(description='QANTA evaluation')
    parser.add_argument('-data', help='location of dataset', default='data/hist_split')
    parser.add_argument('-model', help='location of trained model', default='models/hist_params')
    parser.add_argument('-d', help='word embedding dimension', type=int, default=100)

    args = vars(parser.parse_args())

    print 'qanta performance: '
    evaluate(args['data'], args['model'], args['d'], rnn_feats=True, \
              bow_feats=False, rel_feats=False)

    print '\n\n\n bow performance: '
    evaluate(args['data'], args['model'], args['d'], rnn_feats=False, \
              bow_feats=True, rel_feats=False)

    print '\n\n\n bow-dt performance: '
    evaluate(args['data'], args['model'], args['d'], rnn_feats=False, \
              bow_feats=True, rel_feats=True)
コード例 #4
0
        print time.time() - ep_t
        print 'done with epoch ', epoch, ' epoch error = ', epoch_error, ' min error = ', min_error
        lstring = 'done with epoch ' + str(epoch) + ' epoch error = ' + str(epoch_error) \
                 + ' min error = ' + str(min_error) + '\n\n'
        log.write(lstring)
        log.flush()

        # save parameters if the current model is better than previous best model
        if epoch_error < min_error:
            min_error = epoch_error
            print 'saving model...'
            params = unroll_params(r, d, len_voc, deep = 3)
            cPickle.dump( params, open(param_file, 'wb'))

        # reset adagrad weights
        if epoch % args['adagrad_reset'] == 0 and epoch != 0:
            ag.reset_weights()

        # check accuracy on validation set
        if epoch % args['do_val'] == 0 and epoch != 0:
            print 'validating...'
            params = unroll_params(r, d, len_voc, deep = 3)
            evaluate(train_qs, val_qs, params, d)
            print '\n\n'

    log.close()

    print 'step 2 of 2: training classifier over all answers (takes 10-15 hours depending on number of answers)'
    params = cPickle.load(open(param_file, 'rb'))
    evaluate(train_qs, val_qs, params, d)