コード例 #1
0
                ne=vocsize,
                de=s['emb_dimension'],
                cs=s['win'])

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words = map(lambda x: numpy.asarray(x).astype('int32'), \
                        minibatch(cwords, s['bs']))
            labels = train_y[i]

            for word_batch, label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s['clr'])
                rnn.normalize()

            if s['verbose']:
                print '[learning] epoch %i >> %2.2f%%' % (
                    e, (i + 1) * 100. / nsentences
                ), 'completed in %.2f (sec) <<\r' % (time.time() - tic),
                sys.stdout.flush()

        # evaluation // back into the real world : idx -> words
        predictions_test = [map(lambda x: idx2label[x], \
                                rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32'))) \
コード例 #2
0
ファイル: elman-forward.py プロジェクト: aadamson/is13
                    ne = vocsize,
                    de = s['emb_dimension'],
                    cs = s['win'] )

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s['bs']))
            labels = train_y[i]
            for word_batch , label_last_word in zip(words, labels):
                print "word_batch: ", word_batch
                print "label_last_word: ", label_last_word
                rnn.train(word_batch, label_last_word, s['clr'])
                rnn.normalize()
            if s['verbose']:
                print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic),
                sys.stdout.flush()
            
        # evaluation // back into the real world : idx -> words
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in test_lex ]
        groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
コード例 #3
0
ファイル: elman-forward-cue.py プロジェクト: ffancellu/is13
    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex,train_y,train_cue], s['seed'])
        print '[learning] epoch %d' % e
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            # take the context win of both
            # merge the results
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s['bs']))
            if args.c:
                ccues = contextwin(train_cue[i],s['win'])
                cues_bs = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(ccues, s['bs']))  
            labels = train_y[i]
            if not args.c:
                for word_batch , label_last_word in zip(words, labels):
                    rnn.train(word_batch, label_last_word, s['clr'])
                    rnn.normalize()
            else:
                for word_batch , cues_batch, label_last_word in zip(words, cues_bs,labels):
                    rnn.train(word_batch, cues_batch,label_last_word, s['clr'])
                    rnn.normalize()
                    rnn.normalize_cue()
            if s['verbose']:
コード例 #4
0
ファイル: jordan-forward.py プロジェクト: wqren/is13
    # instanciate the model
    numpy.random.seed(s["seed"])
    random.seed(s["seed"])
    rnn = model(nh=s["nhidden"], nc=nclasses, ne=vocsize, de=s["emb_dimension"], cs=s["win"])

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s["clr"] = s["lr"]
    for e in xrange(s["nepochs"]):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s["seed"])
        s["ce"] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s["win"])
            words = map(lambda x: numpy.asarray(x).astype("int32"), minibatch(cwords, s["bs"]))
            labels = train_y[i]

            for word_batch, label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s["clr"])
                rnn.normalize()

            if s["verbose"]:
                print "[learning] epoch %i >> %2.2f%%" % (
                    e,
                    (i + 1) * 100.0 / nsentences,
                ), "completed in %.2f (sec) <<\r" % (time.time() - tic),
                sys.stdout.flush()

        # evaluation // back into the real world : idx -> words
        predictions_test = [
コード例 #5
0
ファイル: elman-forward.py プロジェクト: npow/is13
                    cs = s.win,
                    memory_size = s.memory_size,
                    n_memory_slots = s.n_memory_slots )

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s.clr = s.lr
    for e in xrange(s.n_epochs):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s.seed)
        s.ce = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s.win)
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s.bs))
            labels = train_y[i]
            for word_batch , label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s.clr)
                rnn.normalize()
            if s.verbose:
                print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic),
                sys.stdout.flush()
            
        # evaluation // back into the real world : idx -> words
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s.win)).astype('int32')))\
                             for x in test_lex ]
        groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
        words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]
コード例 #6
0
ファイル: elman-forward.py プロジェクト: evilmucedin/is13
                    nc = nclasses,
                    ne = vocsize,
                    de = s['emb_dimension'],
                    cs = s['win'] )

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'), minibatch(cwords, s['bs']))
            labels = train_y[i]
            for word_batch, label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s['clr'])
                rnn.normalize()
            if s['verbose']:
                print('[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic), end="")
                sys.stdout.flush()

        # evaluation // back into the real world : idx -> words
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in test_lex ]
        groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
        words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]
コード例 #7
0
ファイル: main.py プロジェクト: wavelets/RNN-EM
                cs=s.win,
                memory_size=s.memory_size,
                n_memory_slots=s.n_memory_slots)

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s.clr = s.lr
    for e in xrange(s.n_epochs):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s.seed)
        s.ce = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s.win)
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s.bs))
            labels = train_y[i]
            for word_batch, label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s.clr)
                rnn.normalize()
            if s.verbose:
                print '[learning] epoch %i >> %2.2f%%' % (
                    e, (i + 1) * 100. / nsentences
                ), 'completed in %.2f (sec) <<\r' % (time.time() - tic),
                sys.stdout.flush()

        # evaluation // back into the real world : idx -> words
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s.win)).astype('int32')))\
                             for x in test_lex ]
        groundtruth_test = [map(lambda x: idx2label[x], y) for y in test_y]
コード例 #8
0
def callRNN():

    s = {'reload':False,
         'model':'the path of the model',
         'isemb':True,
         'lr':0.0627142536696559,
         'verbose':1,
         'decay':True, # decay on the learning rate if improvement stops
         'win':5, # number of words in the context window
         'bs':9, # number of backprop through time steps
         'nhidden':100, # number of hidden units
         'seed':345,
         'emb_dimension':100, # dimension of word embedding
         'nepochs':20}
         
    
    #获取当前文件名
    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset  训练集、开发集、测试集、词典
    train_set, valid_set, test_set, dic = pp.preProcess(segfile, labelfile, embfile)

    #train_set, valid_set, test_set, dic = load.atisfold(s['fold'])

    # 字典中存在labels字典和词典 词-》编号   编号-》词
    idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems())
    idx2word  = dict((k,v) for v,k in dic['words2idx'].iteritems())
    
    #对同一个文件进行处理,处理完成后进行切分,现在没做的
    #数据集中包括编号、每行个数、编号, 训练集4:1切分为训练和开发
    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex,  test_ne,  test_y  = test_set

    #vocsize = len(set(reduce(\
    #                   lambda x, y: list(x)+list(y),\
    #                   train_lex+valid_lex+test_lex)))
    #分类个数,一共多少种类,这个可以直接赋值的
    nclasses = len(idx2word)
    
    #句子数,训练语料的训练句子,用于对句子进行遍历,把握进度
    nsentences = len(train_lex) 

    # instanciate the model
    numpy.random.seed(s['seed'])
    random.seed(s['seed'])

    #初始化模型参数
    print 'init model'
    rnn = model(    nh = s['nhidden'],
                    nc = nclasses,
                    ne = 1,
                    isemb = s['isemb'],
                    de = s['emb_dimension'],
                    cs = s['win'] )

    if s['reload']:
        print 'load model'
        rnn.load(s[model])
    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    print 'start train'
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()

        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s['bs']))
            labels = train_y[i]

            for word_batch , label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s['clr'])  #开始训练
                rnn.normalize()

            if s['verbose']:
                print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic),
                sys.stdout.flush()
            
        # evaluation // back into the real world : idx -> words
        #通过开发集进行调参,主要调节学习率

        #对测试集进行测试,并将结果转化为字母标签
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in test_lex ]

        #将test_y的值使用字母标签进行代替
        groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]

        #进test_lex使用词本身代替
        words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]

        #对开发集结果进行测试,并将结果转化为字母标签
        predictions_valid = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in valid_lex ]

        #将开发集标签使用字母标签替换
        groundtruth_valid = [ map(lambda x: idx2label[x], y) for y in valid_y ]

        #将valid_lex使用词替换
        words_valid = [ map(lambda x: idx2word[x], w) for w in valid_lex]

        # evaluation // compute the accuracy using conlleval.pl
        # 调用conlleval.pl,对test和valid数据集进行结果分析,并将结果进行保存
        res_test  = conlleval(predictions_test, groundtruth_test, words_test, folder +'/test'+str(e)+'.txt')
        res_valid = conlleval(predictions_valid, groundtruth_valid, words_valid, folder + '/valid'+str(e)+'.txt')

        #保存模型
        if not os.path.exists('result'): os.mkdir('result')
        rnn.save('result/'+folder+str(e))

        #对测试集的F值进行比较
        print '第',e,'次迭代的F值为:',res_test['f1'],'开发集F值为',res_valid['f1']
        if res_valid['f1'] > best_f1:            
            best_f1 = res_valid['f1']
            if s['verbose']:
                print 'NEW BEST: epoch', e, 'valid F1', res_valid['f1'], 'best test F1', res_test['f1'], ' '*20
            s['vf1'], s['vp'], s['vr'] = res_valid['f1'], res_valid['p'], res_valid['r']
            s['tf1'], s['tp'], s['tr'] = res_test['f1'],  res_test['p'],  res_test['r']
            s['be'] = e
            #开启子线程执行mv命令,其实就是改名
            subprocess.call(['mv', folder + '/test'+str(e)+'.txt', folder + '/best.test'+str(e)+'.txt'])
            subprocess.call(['mv', folder + '/valid'+str(e)+'.txt', folder + '/best.valid'+str(e)+'.txt'])
        else:
            print ''
        
        # learning rate decay if no improvement in 10 epochs
        if s['decay'] and abs(s['be']-s['ce']) >= 5: 
            s['clr'] *= 0.5
            print '学习率修改为=',s['clr'] 
        if s['clr'] < 1e-5: break

    print 'BEST RESULT: epoch', e, 'valid F1', s['vf1'], 'best test F1', s['tf1'], 'with the model', folder
コード例 #9
0
def callRNN():

    s = {
        'reload': False,
        'model': 'the path of the model',
        'isemb': True,
        'lr': 0.0627142536696559,
        'verbose': 1,
        'decay': True,  # decay on the learning rate if improvement stops
        'win': 5,  # number of words in the context window
        'bs': 9,  # number of backprop through time steps
        'nhidden': 100,  # number of hidden units
        'seed': 345,
        'emb_dimension': 100,  # dimension of word embedding
        'nepochs': 20
    }

    #获取当前文件名
    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset  训练集、开发集、测试集、词典
    train_set, valid_set, test_set, dic = pp.preProcess(
        segfile, labelfile, embfile)

    #train_set, valid_set, test_set, dic = load.atisfold(s['fold'])

    # 字典中存在labels字典和词典 词-》编号   编号-》词
    idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
    idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())

    #对同一个文件进行处理,处理完成后进行切分,现在没做的
    #数据集中包括编号、每行个数、编号, 训练集4:1切分为训练和开发
    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex, test_ne, test_y = test_set

    #vocsize = len(set(reduce(\
    #                   lambda x, y: list(x)+list(y),\
    #                   train_lex+valid_lex+test_lex)))
    #分类个数,一共多少种类,这个可以直接赋值的
    nclasses = len(idx2word)

    #句子数,训练语料的训练句子,用于对句子进行遍历,把握进度
    nsentences = len(train_lex)

    # instanciate the model
    numpy.random.seed(s['seed'])
    random.seed(s['seed'])

    #初始化模型参数
    print 'init model'
    rnn = model(nh=s['nhidden'],
                nc=nclasses,
                ne=1,
                isemb=s['isemb'],
                de=s['emb_dimension'],
                cs=s['win'])

    if s['reload']:
        print 'load model'
        rnn.load(s[model])
    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    print 'start train'
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()

        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s['bs']))
            labels = train_y[i]

            for word_batch, label_last_word in zip(words, labels):
                rnn.train(word_batch, label_last_word, s['clr'])  #开始训练
                rnn.normalize()

            if s['verbose']:
                print '[learning] epoch %i >> %2.2f%%' % (
                    e, (i + 1) * 100. / nsentences
                ), 'completed in %.2f (sec) <<\r' % (time.time() - tic),
                sys.stdout.flush()

        # evaluation // back into the real world : idx -> words
        #通过开发集进行调参,主要调节学习率

        #对测试集进行测试,并将结果转化为字母标签
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in test_lex ]

        #将test_y的值使用字母标签进行代替
        groundtruth_test = [map(lambda x: idx2label[x], y) for y in test_y]

        #进test_lex使用词本身代替
        words_test = [map(lambda x: idx2word[x], w) for w in test_lex]

        #对开发集结果进行测试,并将结果转化为字母标签
        predictions_valid = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in valid_lex ]

        #将开发集标签使用字母标签替换
        groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]

        #将valid_lex使用词替换
        words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]

        # evaluation // compute the accuracy using conlleval.pl
        # 调用conlleval.pl,对test和valid数据集进行结果分析,并将结果进行保存
        res_test = conlleval(predictions_test, groundtruth_test, words_test,
                             folder + '/test' + str(e) + '.txt')
        res_valid = conlleval(predictions_valid, groundtruth_valid,
                              words_valid, folder + '/valid' + str(e) + '.txt')

        #保存模型
        if not os.path.exists('result'): os.mkdir('result')
        rnn.save('result/' + folder + str(e))

        #对测试集的F值进行比较
        print '第', e, '次迭代的F值为:', res_test['f1'], '开发集F值为', res_valid['f1']
        if res_valid['f1'] > best_f1:
            best_f1 = res_valid['f1']
            if s['verbose']:
                print 'NEW BEST: epoch', e, 'valid F1', res_valid[
                    'f1'], 'best test F1', res_test['f1'], ' ' * 20
            s['vf1'], s['vp'], s['vr'] = res_valid['f1'], res_valid[
                'p'], res_valid['r']
            s['tf1'], s['tp'], s['tr'] = res_test['f1'], res_test[
                'p'], res_test['r']
            s['be'] = e
            #开启子线程执行mv命令,其实就是改名
            subprocess.call([
                'mv', folder + '/test' + str(e) + '.txt',
                folder + '/best.test' + str(e) + '.txt'
            ])
            subprocess.call([
                'mv', folder + '/valid' + str(e) + '.txt',
                folder + '/best.valid' + str(e) + '.txt'
            ])
        else:
            print ''

        # learning rate decay if no improvement in 10 epochs
        if s['decay'] and abs(s['be'] - s['ce']) >= 5:
            s['clr'] *= 0.5
            print '学习率修改为=', s['clr']
        if s['clr'] < 1e-5: break

    print 'BEST RESULT: epoch', e, 'valid F1', s['vf1'], 'best test F1', s[
        'tf1'], 'with the model', folder
コード例 #10
0
ファイル: deep-example.py プロジェクト: aadamson/is13
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-v', '--verbose', action='count', default=1,
                        help='Adjust level of verbosity.')
    parser.add_argument('-nh', '--num-hidden', dest='num_hidden', type=int, default=100,
                        help='Set dimension of hidden units.')
    parser.add_argument('-w', '--window', type=int, default=5,
                        help='Set size of context window (in words).')
    parser.add_argument('-d', '--depth', type=int, default=3,
                        help='Set number of stacked layers')
    parser.add_argument('--seed', type=int, default=345,
                        help='Set PRNG seed')
    parser.add_argument('--emb-dim', dest='emb_dimension', type=int, default=100,
                        help='Set size of word embeddings')
    parser.add_argument('-e', '--num-epochs', dest='num_epochs', type=int, default=50,
                        help='Set number of epochs to train')

    args = parser.parse_args()

    s = {'fold':3, # 5 folds 0,1,2,3,4
         'lr':0.0627142536696559,
         'verbose': args.verbose,
         'decay': False, # decay on the learning rate if improvement stops
         'win': args.window, # number of words in the context window
         'bs':9, # number of backprop through time steps
         'nhidden': args.num_hidden, # number of hidden units
         'depth': args.depth, # number of layers in space
         'seed': args.seed,
         'emb_dimension': args.emb_dimension, # dimension of word embedding
         'nepochs': args.num_epochs}

    folder = os.path.basename(__file__).split('.')[0]
    if not os.path.exists(folder): os.mkdir(folder)

    # load the dataset
    train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
    idx2label = dict((k,v) for v,k in dic['labels2idx'].iteritems())
    idx2word  = dict((k,v) for v,k in dic['words2idx'].iteritems())

    train_lex, train_ne, train_y = train_set
    valid_lex, valid_ne, valid_y = valid_set
    test_lex,  test_ne,  test_y  = test_set

    vocsize = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_lex+valid_lex+test_lex)))

    nclasses = len(set(reduce(\
                       lambda x, y: list(x)+list(y),\
                       train_y+test_y+valid_y)))
    
    nsentences = len(train_lex)

    # instantiate the model
    numpy.random.seed(s['seed'])
    random.seed(s['seed'])
    rnn = model(    nh = s['nhidden'],
                    nc = nclasses,
                    ne = vocsize,
                    de = s['emb_dimension'],
                    cs = s['win'],
                    depth = s['depth'] )

    # train with early stopping on validation set
    best_f1 = -numpy.inf
    s['clr'] = s['lr']
    for e in xrange(s['nepochs']):
        # shuffle
        shuffle([train_lex, train_ne, train_y], s['seed'])
        s['ce'] = e
        tic = time.time()
        for i in xrange(nsentences):
            cwords = contextwin(train_lex[i], s['win'])
            words  = map(lambda x: numpy.asarray(x).astype('int32'),\
                         minibatch(cwords, s['bs']))
            labels = train_y[i]
            for word_batch, label_last_word in zip(words, labels):
                print word_batch
                #print label_last_word
                #pdb.set_trace()
                rnn.train(word_batch, label_last_word, s['clr'])
                rnn.normalize()
            if s['verbose']:
                print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic),
                sys.stdout.flush()
            
        # evaluation // back into the real world : idx -> words
        predictions_test = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in test_lex ]
        groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
        words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]

        predictions_valid = [ map(lambda x: idx2label[x], \
                             rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
                             for x in valid_lex ]
        groundtruth_valid = [ map(lambda x: idx2label[x], y) for y in valid_y ]
        words_valid = [ map(lambda x: idx2word[x], w) for w in valid_lex]

        # evaluation // compute the accuracy using conlleval.pl
        res_test  = conlleval(predictions_test, groundtruth_test, words_test, folder + '/current.test.txt')
        res_valid = conlleval(predictions_valid, groundtruth_valid, words_valid, folder + '/current.valid.txt')

        if res_valid['f1'] > best_f1:
            rnn.save(folder)
            best_f1 = res_valid['f1']
            if s['verbose']:
                print 'NEW BEST: epoch', e, 'valid F1', res_valid['f1'], 'best test F1', res_test['f1'], ' '*20
            s['vf1'], s['vp'], s['vr'] = res_valid['f1'], res_valid['p'], res_valid['r']
            s['tf1'], s['tp'], s['tr'] = res_test['f1'],  res_test['p'],  res_test['r']
            s['be'] = e
            subprocess.call(['mv', folder + '/current.test.txt', folder + '/best.test.txt'])
            subprocess.call(['mv', folder + '/current.valid.txt', folder + '/best.valid.txt'])
        else:
            print ''
        
        # learning rate decay if no improvement in 10 epochs
        if s['decay'] and abs(s['be']-s['ce']) >= 10: s['clr'] *= 0.5 
        if s['clr'] < 1e-5: break

    print 'BEST RESULT: epoch', e, 'valid F1', s['vf1'], 'best test F1', s['tf1'], 'with the model', folder