コード例 #1
0
        'lr': 0.0627142536696559,
        'verbose': 1,
        'decay': False,  # decay on the learning rate if improvement stops
        'win': 7,  # number of words in the context window
        'bs': 9,  # number of backprop through time steps
        'nhidden': 100,  # number of hidden units
        'seed': 345,
        'emb_dimension': 100,  # dimension of word embedding
        'nepochs': 50
    }

    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
    idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
    idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    vocsize = len(set(reduce( \
        lambda x, y: list(x) + list(y), \
        train_lex + valid_lex + test_lex)))

    nclasses = len(set(reduce( \
        lambda x, y: list(x) + list(y), \
        train_y + test_y + valid_y)))
コード例 #2
0
ファイル: elman-forward.py プロジェクト: hadyelsahar/OpenIE
        'lr': 0.0627142536696559,
        'verbose': 1,
        'decay': False,  # decay on the learning rate if improvement stops
        'win': 7,  # number of words in the context window
        'bs': 9,  # number of backprop through time steps
        'nhidden': 100,  # number of hidden units
        'seed': 345,
        'emb_dimension': 100,  # dimension of word embedding
        'nepochs': 50
    }

    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(args.input)
    idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
    idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    vocsize = len(dic['words2idx'])
    nclasses = len(dic['labels2idx'])
    nsentences = len(train_lex)

    # instanciate the model
    numpy.random.seed(s['seed'])
    random.seed(s['seed'])
    rnn = model(nh=s['nhidden'],
コード例 #3
0
if __name__ == '__main__':
    folder = os.path.join(
        'out/',
        os.path.basename(__file__).split('.')[0])  # folder = 'out/bilstm-lm'
    os.makedirs(folder, exist_ok=True)

    print('ATIS fold:', FLAGS.fold)
    print('with language model:', FLAGS.with_lm)
    print('with GloVe:', FLAGS.with_glove)
    print('with bi-LSTM:', FLAGS.bi_lstm)
    print('Training epochs:', FLAGS.nepochs)
    print('Training data size:', FLAGS.nsentences)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(
        FLAGS.fold)  # size: 3983, 893, 893
    idx2word = dict((k, v) for v, k in dic['words2idx'].items())
    idx2label = dict((k, v) for v, k in dic['labels2idx'].items())

    # id, named entity, label
    train_x, train_ne, train_y = train_set
    valid_x, valid_ne, valid_y = valid_set
    test_x, test_ne, test_y = test_set

    vocsize = len(dic['words2idx'])
    nclasses = len(dic['labels2idx'])
    nsentences = len(train_x)
    assert FLAGS.nsentences <= nsentences, 'Training data size needs to be less than 3983.'

    sentences_train = [
        ' '.join(list(map(lambda x: idx2word[x], s))) for s in train_x
コード例 #4
0
ファイル: elman-forward.py プロジェクト: aadamson/is13
    s = {'fold':3, # 5 folds 0,1,2,3,4
         'lr':0.0627142536696559,
         'verbose':1,
         'decay':False, # decay on the learning rate if improvement stops
         'win':7, # number of words in the context window
         'bs':9, # number of backprop through time steps
         'nhidden':100, # number of hidden units
         'seed':345,
         'emb_dimension':100, # dimension of word embedding
         'nepochs':50}

    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
    idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems())
    idx2word  = dict((k,v) for v,k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex,  test_ne,  test_y  = test_set

    vocsize = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_lex+valid_lex+test_lex)))

    nclasses = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_y+test_y+valid_y)))
    
コード例 #5
0
ファイル: WVLinear.py プロジェクト: libofang/ARNN-SF
def test(s):

    # load word vector
    wv = np.load("./../WV/" + s['WVFolderName'] + "/" + s['model']+".words" + str(s['emb_dimension']) + ".npy")
    # load vocab
    with open("./../WV/" + s['WVFolderName'] + "/" + s['model']+".words" + str(s['emb_dimension']) + ".vocab") as f:
        vocab = [line.strip() for line in f if len(line) > 0]
    wi = dict([(a, i) for i, a in enumerate(vocab)])
    iw = vocab

    # load the dataset
    if s['dataset'] == 'atis':
        train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
    if s['dataset'] == 'ner':
        train_set, valid_set, test_set, dic = load.ner()
    if s['dataset'] == 'chunk':
        train_set, valid_set, test_set, dic = load.chunk()
    if s['dataset'] == 'pos':
        train_set, valid_set, test_set, dic = load.pos()

    idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
    idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    # train_lex.extend(valid_lex)
    # train_ne.extend(valid_ne)
    # train_y.extend(valid_y)

    vocsize = len(dic['words2idx'])
    nclasses = len(dic['labels2idx'])

    my_train_input, my_train_y = getInputOutput(train_lex, train_y, s['win'], idx2word)
    my_train_x = getX(my_train_input, wv, iw, wi)

    my_test_input, my_test_y = getInputOutput(test_lex, test_y, s['win'], idx2word)
    my_test_x = getX(my_test_input, wv, iw, wi)

    clf = MLPClassifier(hidden_layer_sizes=(), verbose=False, activation='tanh')
    clf.fit(my_train_x, my_train_y)



    # eval
    eval_options = []
    if s['dataset'] == 'pos':
        eval_options = ['-r']
    my_train_yp = clf.predict(my_train_x)
    my_test_yp = clf.predict(my_test_x)
    # print my_train_y
    # print my_train_yp
    predictions_train = getFormatedPY(train_y, my_train_yp, idx2label)
    groundtruth_train = [map(lambda x: idx2label[x], y) for y in train_y]
    words_train = [map(lambda x: idx2word[x], w) for w in train_lex]

    predictions_test = getFormatedPY(test_y, my_test_yp, idx2label)
    groundtruth_test = [map(lambda x: idx2label[x], y) for y in test_y]
    words_test = [map(lambda x: idx2word[x], w) for w in test_lex]

    res_train = conlleval(predictions_train, groundtruth_train, words_train, folder + '/linear.train.' + s['dataset'] + '.txt', eval_options)
    res_test = conlleval(predictions_test, groundtruth_test, words_test, folder + '/linear.test.' + s['dataset'] + '.txt', eval_options)

    # print '                        train', res_train['p'], res_train['r'], res_train['f1'] , ' ' * 20
    # print '                         test', res_test['p'], res_test['r'], res_test['f1'] , ' ' * 20
    print res_test['f1'],
コード例 #6
0
ファイル: elman-forward.py プロジェクト: npow/is13
    parser.add_argument('--seed', type=int, default=345, help='Seed')
    parser.add_argument('--bs', type=int, default=9, help='Number of backprop through time steps')
    parser.add_argument('--win', type=int, default=7, help='Number of words in context window')
    parser.add_argument('--fold', type=int, default=4, help='Fold number, 0-4')
    parser.add_argument('--lr', type=float, default=0.0627142536696559, help='Learning rate')
    parser.add_argument('--verbose', type=int, default=1, help='Verbose or not')
    parser.add_argument('--decay', type=int, default=0, help='Decay lr or not')
    s = parser.parse_args()

    print '*' * 80
    print s
    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s.fold)
    idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems())
    idx2word  = dict((k,v) for v,k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex,  test_ne,  test_y  = test_set

    vocsize = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_lex+valid_lex+test_lex)))

    nclasses = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_y+test_y+valid_y)))
    
コード例 #7
0
ファイル: jordan-forward.py プロジェクト: wqren/is13
        "verbose": 1,
        "decay": False,  # decay on the learning rate if improvement stops
        "win": 7,  # number of words in the context window
        "bs": 9,  # number of backprop through time steps
        "nhidden": 100,  # number of hidden units
        "seed": 345,
        "emb_dimension": 100,  # dimension of word embedding
        "nepochs": 50,
    }

    folder = os.path.basename(__file__).split(".")[0]
    if not os.path.exists(folder):
        os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s["fold"])
    idx2label = dict((k, v) for v, k in dic["labels2idx"].iteritems())
    idx2word = dict((k, v) for v, k in dic["words2idx"].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    vocsize = len(set(reduce(lambda x, y: list(x) + list(y), train_lex + valid_lex + test_lex)))

    nclasses = len(set(reduce(lambda x, y: list(x) + list(y), train_y + test_y + valid_y)))

    nsentences = len(train_lex)

    # instanciate the model
    numpy.random.seed(s["seed"])
コード例 #8
0
ファイル: main.py プロジェクト: wavelets/RNN-EM
                        default=0.0627142536696559,
                        help='Learning rate')
    parser.add_argument('--verbose',
                        type=int,
                        default=1,
                        help='Verbose or not')
    parser.add_argument('--decay', type=int, default=0, help='Decay lr or not')
    s = parser.parse_args()

    print '*' * 80
    print s
    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s.fold)
    idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
    idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    vocsize = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_lex+valid_lex+test_lex)))

    nclasses = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_y+test_y+valid_y)))
コード例 #9
0
ファイル: deep-example.py プロジェクト: aadamson/is13
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-v', '--verbose', action='count', default=1,
                        help='Adjust level of verbosity.')
    parser.add_argument('-nh', '--num-hidden', dest='num_hidden', type=int, default=100,
                        help='Set dimension of hidden units.')
    parser.add_argument('-w', '--window', type=int, default=5,
                        help='Set size of context window (in words).')
    parser.add_argument('-d', '--depth', type=int, default=3,
                        help='Set number of stacked layers')
    parser.add_argument('--seed', type=int, default=345,
                        help='Set PRNG seed')
    parser.add_argument('--emb-dim', dest='emb_dimension', type=int, default=100,
                        help='Set size of word embeddings')
    parser.add_argument('-e', '--num-epochs', dest='num_epochs', type=int, default=50,
                        help='Set number of epochs to train')

    args = parser.parse_args()

    s = {'fold':3, # 5 folds 0,1,2,3,4
         'lr':0.0627142536696559,
         'verbose': args.verbose,
         'decay': False, # decay on the learning rate if improvement stops
         'win': args.window, # number of words in the context window
         'bs':9, # number of backprop through time steps
         'nhidden': args.num_hidden, # number of hidden units
         'depth': args.depth, # number of layers in space
         'seed': args.seed,
         'emb_dimension': args.emb_dimension, # dimension of word embedding
         'nepochs': args.num_epochs}

    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
    idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems())
    idx2word  = dict((k,v) for v,k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex,  test_ne,  test_y  = test_set

    vocsize = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_lex+valid_lex+test_lex)))

    nclasses = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_y+test_y+valid_y)))
    
    nsentences = len(train_lex)

    # instantiate the model
    numpy.random.seed(s['seed'])
    random.seed(s['seed'])
    rnn = model(    nh = s['nhidden'],
                    nc = nclasses,
                    ne = vocsize,
                    de = s['emb_dimension'],
                    cs = s['win'],
                    depth = s['depth'] )

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s['bs']))
            labels = train_y[i]
            for word_batch, label_last_word in zip(words, labels):
                print word_batch
                #print label_last_word
                #pdb.set_trace()
                rnn.train(word_batch, label_last_word, s['clr'])
                rnn.normalize()
            if s['verbose']:
                print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic),
                sys.stdout.flush()
            
        # evaluation // back into the real world : idx -> words
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in test_lex ]
        groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
        words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]

        predictions_valid = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in valid_lex ]
        groundtruth_valid = [ map(lambda x: idx2label[x], y) for y in valid_y ]
        words_valid = [ map(lambda x: idx2word[x], w) for w in valid_lex]

        # evaluation // compute the accuracy using conlleval.pl
        res_test  = conlleval(predictions_test, groundtruth_test, words_test, folder + '/current.test.txt')
        res_valid = conlleval(predictions_valid, groundtruth_valid, words_valid, folder + '/current.valid.txt')

        if res_valid['f1'] > best_f1:
            rnn.save(folder)
            best_f1 = res_valid['f1']
            if s['verbose']:
                print 'NEW BEST: epoch', e, 'valid F1', res_valid['f1'], 'best test F1', res_test['f1'], ' '*20
            s['vf1'], s['vp'], s['vr'] = res_valid['f1'], res_valid['p'], res_valid['r']
            s['tf1'], s['tp'], s['tr'] = res_test['f1'],  res_test['p'],  res_test['r']
            s['be'] = e
            subprocess.call(['mv', folder + '/current.test.txt', folder + '/best.test.txt'])
            subprocess.call(['mv', folder + '/current.valid.txt', folder + '/best.valid.txt'])
        else:
            print ''
        
        # learning rate decay if no improvement in 10 epochs
        if s['decay'] and abs(s['be']-s['ce']) >= 10: s['clr'] *= 0.5 
        if s['clr'] < 1e-5: break

    print 'BEST RESULT: epoch', e, 'valid F1', s['vf1'], 'best test F1', s['tf1'], 'with the model', folder
コード例 #10
0
def run(s) :

    print s
    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)
    #print folder

    # load the dataset
    eval_options = []
    if s['dataset'] == 'atis':
        train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
    if s['dataset'] == 'ner':
        train_set, valid_set, test_set, dic = load.ner()
    if s['dataset'] == 'chunk':
        train_set, valid_set, test_set, dic = load.chunk()
    if s['dataset'] == 'pos':
        train_set, valid_set, test_set, dic = load.pos()
        eval_options = ['-r']

    idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
    idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())



    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    vocsize = len(dic['words2idx'])
    nclasses = len(dic['labels2idx'])
    nsentences = len(train_lex)

    wv = None
    if 'WVFolderName' in s:
        # load word vector
        # wv = numpy.zeros((vocsize+1, s['emb_dimension']))
        # input = open(s['wv_folder'] + str(s['emb_dimension']), 'r')
        # for line in input:
        #     tokens = line.split(' ')
        #     wv[int(tokens[0])] = [float(tokens[j]) for j in xrange(1, len(tokens) - 1)]

        # load word vector
        wvnp = np.load("./../WV/" + s['WVFolderName'] + "/" + s['model']+".words" + str(s['emb_dimension']) + ".npy")
        # load vocab
        with open("./../WV/" + s['WVFolderName'] + "/" + s['model']+".words" + str(s['emb_dimension']) + ".vocab") as f:
            vocab = [line.strip() for line in f if len(line) > 0]
        wi = dict([(a, i) for i, a in enumerate(vocab)])
        iw = vocab
        wv = numpy.zeros((vocsize + 1, s['emb_dimension']))
        random_v = math.sqrt(6.0 / numpy.sum(s['emb_dimension'])) * numpy.random.uniform(-1.0, 1.0, (s['emb_dimension']))

        miss = 0
        for i in range(0, vocsize):
            word = idx2word[i]
            if word in wi:
                wv[i] = wvnp[wi[word]]
                # print wvnp[wi[word]]
            else:
                wv[i] = random_v
                miss += 1
        print miss, '/', vocsize

    best_valid = numpy.zeros(len(s['rho'])) - numpy.inf
    best_test = numpy.zeros(len(s['rho'])) - numpy.inf
    test_f1List = [[],[],[],[],[],[] ]
    # print 111
    # print test_f1List


    # instanciate the model
    numpy.random.seed(s['seed'])
    random.seed(s['seed'])
    rnn = elman_attention.model(nh=s['nhidden'],
                                    nc=nclasses,
                                    ne=vocsize,
                                    de=s['emb_dimension'],
                                    attention=s['attention'],
                                    h_win=s['h_win'],
                                    lvrg=s['lvrg'],
                                    wv=wv)



    # train with early stopping on validation set
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i])
            labels = train_y[i]

            # for j in xrange(len(words)):
            #    if j >= 2 :
            #        rnn.train(words[j], [labels[j-2], labels[j-1], labels[j]], s['clr'])

            nl, aaL = rnn.train(cwords, labels, s['dropRate'], 1)
            #if i % 1 == 0:
            #    print aaL
            # rnn.normalize()
            if s['verbose']:
                sys.stdout.write(('\r[learning] epoch %i >> %2.2f%%' % (
                    e, (i + 1) * 100. / nsentences) +
                                  ('  average speed in %.2f (min) <<' % (
                                      (time.time() - tic) / 60 / (i + 1) * nsentences)) + (' completed in %.2f (sec) <<' % (
                    (time.time() - tic)))))
                sys.stdout.flush()

        print 'start test', time.time() / 60
        # print avgSentenceLength / (nsentences)
        # evaluation // back into the real world : idx -> words
        # evaluation // back into the real world : idx -> words

        print 'start pred train', time.time() / 60
        predictions_train = [[map(lambda varible: idx2label[varible], w)\
                              for w in rnn.classify(numpy.asarray(contextwin(x)).astype('int32'), s['dropRate'], 0, s['rho'])]
                             for x in train_lex]
        groundtruth_train = [map(lambda x: idx2label[x], y) for y in train_y]
        words_train = [map(lambda x: idx2word[x], w) for w in train_lex]

        #print 'start pred test', time.time() / 60
        predictions_test = [[map(lambda varible: idx2label[varible], w)\
                             for w in rnn.classify(numpy.asarray(contextwin(x)).astype('int32'), s['dropRate'], 0, s['rho'])]
                            for x in test_lex]
        groundtruth_test = [map(lambda x: idx2label[x], y) for y in test_y]
        words_test = [map(lambda x: idx2word[x], w) for w in test_lex]

        #print 'start pred valid', time.time() / 60
        predictions_valid = [[map(lambda varible: idx2label[varible], w)\
                              for w in rnn.classify(numpy.asarray(contextwin(x)).astype('int32'), s['dropRate'], 0, s['rho'])]
                             for x in valid_lex]
        groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]
        words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]

        #print 'end pred, start eval', time.time() / 60

        # evaluation // compute the accuracy using conlleval.pl
        for i_rho in xrange(len(s['rho'])) :
            ptrain = [p[i_rho] for p in predictions_train]
            ptest = [p[i_rho] for p in predictions_test]
            pvalid = [p[i_rho] for p in predictions_valid]


            res_train = conlleval(ptrain, groundtruth_train, words_train, folder + '/current.train.txt' + str(s['seed']), eval_options)
            res_test = conlleval(ptest, groundtruth_test, words_test, folder + '/current.test.txt' + str(s['seed']), eval_options)
            res_valid = conlleval(pvalid, groundtruth_valid, words_valid, folder + '/current.valid.txt' + str(s['seed']), eval_options)

            print '                                     epoch', e, ' rho ', i_rho, '  train p', res_train[
                'p'], 'valid p', res_valid[
                'p'],'  train r', res_train[
                'r'], 'valid r', res_valid[
                'r'],'  train F1', res_train[
                'f1'], 'valid F1', res_valid[
                'f1'], 'best test F1', res_test['f1'], ' ' * 20

            test_f1List[i_rho].append(res_test['f1'])

            if res_valid['f1'] > best_valid[i_rho]:
                best_valid[i_rho] = res_valid['f1']
                best_test[i_rho] = res_test['f1']
        for i_rho in xrange(len(s['rho'])) :
            print i_rho, s['dataset'],
            if s['model'] == 'glove':
                print s['WVFolderName'].replace('skip', 'glove'),
            else:
                print s['WVFolderName'],
            for iff1 in test_f1List[i_rho]:
                print iff1,
            print ''
        for i_rho in xrange(len(s['rho'])) :
            print s['rho'][i_rho], ' ', best_valid[i_rho] , '/' , best_test[i_rho]

        #print 'end eval', time.time() / 60


    print 'BEST RESULT: epoch', e, 'valid F1', s['vf1'], 'best test F1', s['tf1'], 'with the model', folder