Example #1
0
def Output_Result(doc4test):
    mp, mr, mf = evaluation.evaluate_documents(doc4test, evaluation.muc)
    #print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp, br, bf = evaluation.evaluate_documents(doc4test, evaluation.b_cubed)
    #print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp, cr, cf = evaluation.evaluate_documents(doc4test, evaluation.ceafe)
    #print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    metrics = {}
    metrics["muc"] = (mr, mp, mf)
    metrics["b3"] = (br, bp, bf)
    metrics["ceaf"] = (cr, cp, cf)
    return metrics
def get_reward(cluster_info, gold_info, max_cluster_num):
    ev_document = get_evaluation_document(cluster_info, gold_info,
                                          max_cluster_num)
    p, r, f = evaluation.evaluate_documents([ev_document], evaluation.b_cubed)
    #p,r,f = evaluation.evaluate_documents([ev_document],evaluation.muc)
    #print >> sys.stderr, p,r,f
    return f
Example #3
0
def get_reward_average(cluster_info,gold_info,max_cluster_num,index,max_cluster_index):
    # build new cluster
    new_cluster_prefix = cluster_info[:index]
    new_cluster_postfix = cluster_info[index+1:]

    el = []
    
    for cluster_num in range(max_cluster_index):
        new_cluster_info = new_cluster_prefix + [cluster_num] + new_cluster_postfix 
        ev_document = get_evaluation_document(new_cluster_info,gold_info,max_cluster_num)
        el.append(ev_document)
    p,r,f = evaluation.evaluate_documents(el,evaluation.b_cubed)
    #p,r,f = evaluation.evaluate_documents([ev_document],evaluation.muc)
    #print >> sys.stderr, p,r,f
    return f
Example #4
0
def compute_metrics(docs, prefix):
    results = {}
    for name, metric in [(' muc', evaluation.muc), (' b3', evaluation.b_cubed),
                         (' ceafe', evaluation.ceafe), (' lea', evaluation.lea)]:
        p, r, f1 = evaluation.evaluate_documents(docs, metric)
        results[prefix + name] = f1
        results[prefix + name + ' precision'] = p
        results[prefix + name + ' recall'] = r
    muc, b3, ceafe, lea = \
        results[prefix + ' muc'], results[prefix + ' b3'], results[prefix + ' ceafe'], results[prefix + ' lea']
    conll = (muc + b3 + ceafe) / 3
    print "{:} - MUC: {:0.2f} - B3: {:0.2f} - CEAFE: {:0.2f} - LEA {:0.2f} - CoNLL {:0.2f}".format(
        prefix, 100 * muc, 100 * b3, 100 * ceafe, 100 * lea, 100 * conll)
    results[prefix + ' conll'] = conll
    return results
Example #5
0
def compute_metrics(docs, prefix):
    results = {}
    for name, metric in [(' muc', evaluation.muc), (' b3', evaluation.b_cubed),
                         (' ceafe', evaluation.ceafe),
                         (' lea', evaluation.lea)]:
        p, r, f1 = evaluation.evaluate_documents(docs, metric)
        results[prefix + name] = f1
        results[prefix + name + ' precision'] = p
        results[prefix + name + ' recall'] = r
    muc, b3, ceafe, lea = \
        results[prefix + ' muc'], results[prefix + ' b3'], results[prefix + ' ceafe'], results[prefix + ' lea']
    conll = (muc + b3 + ceafe) / 3
    print "{:} - MUC: {:0.2f} - B3: {:0.2f} - CEAFE: {:0.2f} - LEA {:0.2f} - CoNLL {:0.2f}".format(
        prefix, 100 * muc, 100 * b3, 100 * ceafe, 100 * lea, 100 * conll)
    results[prefix + ' conll'] = conll
    return results
Example #6
0
def main():

    embedding_dir = args.embedding+args.language

    print >> sys.stderr,"Read Embedding from %s ..."%embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir,embedding_dimention)

    #network_model
    net_dir = "./model/pretrain_ana/network_model_pretrain.cn.3"
    #net_dir = "./model/pretrain/network_model_pretrain.cn.10"
    #net_dir = "./model/nets/network_model.cn.1"
    #net_dir = './model/network_model.cn'
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
    print >> sys.stderr,"Read model from ./model/network_model."+args.language
    read_f = file(net_dir, 'rb')
    network_model = cPickle.load(read_f)
    #network_model = network.NetWork(1738,855,1000)

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")
    
    MAX=5

    train4test = [] # add 5 items for testing the training performance
    ## test performance after pretraining
    dev_docs_for_test = []
    num = 0
    for cases,gold_chain in DataGenerate.case_generater(train_docs,"train",w2v):
    #for cases,gold_chain in DataGenerate.case_generater(dev_docs,"dev",w2v):
        ev_doc = policy_network.generate_policy_test(cases,gold_chain,network_model)
        dev_docs_for_test.append(ev_doc)
        train4test.append((cases,gold_chain))
        num += 1
        if num >= MAX:
            break

    print "Performance on DATA after PreTRAINING"
    mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    print "#################################################" 
    sys.stdout.flush()
    print >> sys.stderr,"Pre Train done"

    ##train
    add2train = True

    ran_p = 0.0
    l2_lambda = 0.0000003
    #l2_lambda = 0.0001
    lr = 0.0002
    #lr = 0.0
    #lr = 0.0001
    #ce_lmbda = 0.1
    ce_lmbda = 0.0

    for echo in range(50):
        start_time = timeit.default_timer()
        cost_this_turn = 0.0
        average_reward = 0.0
        done_case_num = 0

        #for cases,gold_chain in DataGenerate.case_generater_trick(train_docs,"train",w2v):
        for cases,gold_chain in DataGenerate.case_generater(train_docs,"train",w2v):
            #for single_mention_array,train_list,lable_list in pretrain.generate_pretrain_case(cases,gold_chain,network_model):
            #    print lable_list


            this_reward = 0.0

            reward_baseline = []
    
            zero_num = 0

            for single, train, action, reward in policy_network.generate_policy_case(cases,gold_chain,network_model,ran_p):
            #for single, train, action, reward , acp in policy_network.generate_policy_case_trick(cases,gold_chain,network_model,ran_p):


                reward_b = 0 if len(reward_baseline) < 1 else float(sum(reward_baseline))/float(len(reward_baseline))

                norm_reward = reward - reward_b if reward > reward_b else 0.00001

                this_reward = reward

                this_cost = network_model.train_step(single,train,action,reward,lr,l2_lambda,ce_lmbda,0.0)[0]
                #this_cost = network_model.train_step(single,train,action,norm_reward,lr,l2_lambda,ce_lmbda)[0]
                #print reward,this_cost
                cost_this_turn += this_cost

                #print this_cost,acp,reward
                #print this_cost
                reward_baseline.append(this_reward)
                if len(reward_baseline) >= 32:
                    reward_baselin = reward_baseline[1:]

            average_reward += this_reward
            done_case_num += 1

            if done_case_num >= MAX:
                break

        print network_model.get_weight_sum()
        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:",cost_this_turn
        print >> sys.stderr, "Average Reward:",average_reward/float(done_case_num)
        print >> sys.stderr, "TRAINING Use %.3f seconds"%(end_time-start_time)
        ran_p = ran_p*0.5
        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()

        for train_cases,train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(train_cases,train_doc_gold_chain,network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **"%echo
        print "TRAIN"
        mp,mr,mf = evaluation.evaluate_documents(train_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(train_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(train_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print
        sys.stdout.flush()

        '''
Example #7
0
def get_reward(cluster_info, gold_info, max_cluster_num):
    ev_document = get_evaluation_document(cluster_info, gold_info,
                                          max_cluster_num)
    p, r, f = evaluation.evaluate_documents([ev_document], evaluation.b_cubed)
    return p, r, f
Example #8
0
def main():

    embedding_dir = args.embedding + args.language

    print >> sys.stderr, "Read Embedding from %s ..." % embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir, embedding_dimention)

    #network_model
    #net_dir = "./model/pretrain/network_model_pretrain.cn.19"
    #net_dir = "./model/pretrain_manu_dropout/network_model_pretrain.cn.10"
    if os.path.isfile("./model/network_model." + args.language):
        read_f = file('./model/network_model.' + args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.cn.best', 'rb')
        #read_f = file(net_dir, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr, "Read model from ./model/network_model." + args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention, single_dimention, 1000)
        print >> sys.stderr, "save model ..."
        save_f = file('./model/network_model.' + args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    #pretrain
    l2_lambda = 0.0000001
    lr = 0.03
    ce_lambda = 0.0001
    dropout_rate = 0.2

    print "Weight Sum", network_model.get_weight_sum()

    times = 0
    #for echo in range(11,40):
    for echo in range(10):

        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        done_num = 0
        pos_num = 0
        neg_num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):

                #cost_this_turn += network_model.pre_train_step(single_mention_array,train_list,lable_list,lr,l2_lambda,dropout_rate)[0]

                if lable_list[0] == 1:
                    neg_num += 1
                    ana_cost, ana_result = network_model.ana_train_step(
                        single_mention_array, 1, lr, l2_lambda, dropout_rate)
                else:
                    pos_num += 1
                    ana_cost, ana_result = network_model.ana_train_step(
                        single_mention_array, 0, lr, l2_lambda, dropout_rate)
                for intance, lable in zip(train_list, lable_list):
                    mention_cost, mention_result = network_model.mention_train_step(
                        intance, lable, lr, l2_lambda, dropout_rate)

            done_num += 1
            if done_num == 10:
                break
        lr = lr * 0.99

        save_f = file(
            './model/pretrain_manu_new/network_model_pretrain_pair.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "POS:NEG", pos_num, neg_num
        print >> sys.stderr, "lr", lr
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print "Weight Sum", network_model.get_weight_sum()

        ## test performance after pretraining
        dev_docs_for_test = []
        num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                cases, gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
            num += 1
            if num == 10:
                break
        print "Performance on DEV after PreTRAINING"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print "#################################################"
        sys.stdout.flush()

    print >> sys.stderr, "Begin Normal Training"
    for echo in range(30):

        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        done_num = 0
        pos_num = 0
        neg_num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_model.pre_train_step(
                    single_mention_array, train_list, lable_list, lr,
                    l2_lambda, dropout_rate)[0]
                #cost_this_turn += network_model.pre_top_train_step(single_mention_array,train_list,lable_list,lr,l2_lambda)[0]

                if lable_list[0] == 1:
                    neg_num += 1
                else:
                    pos_num += 1

            done_num += 1
            #if done_num == 10:
            #    break
        lr = lr * 0.99

        save_f = file(
            './model/pretrain_manu_new/network_model_pretrain.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "POS:NEG", pos_num, neg_num
        print >> sys.stderr, "lr", lr
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print "Weight Sum", network_model.get_weight_sum()

        ## test performance after pretraining
        dev_docs_for_test = []
        num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                cases, gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
            num += 1
            if num == 10:
                break
        print "Performance on DEV after PreTRAINING"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print "#################################################"
        sys.stdout.flush()

    return

    for echo in range(30, 50):
        start_time = timeit.default_timer()
        cost_this_turn = 0.0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_model.pre_ce_train_step(
                    single_mention_array, train_list, lable_list, lr,
                    l2_lambda, ce_lambda)[0]

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print "Weight Sum", network_model.get_weight_sum()

        ## test performance after pretraining
        dev_docs_for_test = []
        num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                cases, gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
        print "Performance on DEV after PreTRAINING"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print "#################################################"
        sys.stdout.flush()

        save_f = file(
            './model/pretrain_manu_new/network_model_pretrain.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    ## test performance after pretraining
    print >> sys.stderr, "Begin test on DEV after pertraining"
    dev_docs_for_test = []
    num = 0
    #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
    #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
    for cases, gold_chain in DataGenerate.case_generater(dev_docs, "dev", w2v):
        ev_doc = policy_network.generate_policy_test(cases, gold_chain,
                                                     network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
    bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
    cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
    print "#################################################"
    sys.stdout.flush()
    print >> sys.stderr, "Pre Train done"
Example #9
0
def main():

    embedding_dir = args.embedding + args.language

    print >> sys.stderr, "Read Embedding from %s ..." % embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir, embedding_dimention)

    #network_model_manager
    if os.path.isfile("./model/network_model_manager." + args.language):
        #read_f = file('./model/network_model_manager.'+args.language, 'rb')
        read_f = file(
            './model/network_model_pretrain_manager.' + args.language, 'rb')
        network_manager = cPickle.load(read_f)
        print >> sys.stderr, "Read model from ./model/network_model_manager." + args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        cluster_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673
            cluster_dimention = 855

        network_manager = network.Manager(inpt_dimention, single_dimention,
                                          1000)
        print >> sys.stderr, "save model network_manager..."
        save_f = file('./model/network_model_manager.' + args.language, 'wb')
        cPickle.dump(network_manager,
                     save_f,
                     protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    #network_model_worker
    if os.path.isfile("./model/network_model_worker." + args.language):
        read_f = file('./model/network_model_worker.' + args.language, 'rb')
        #read_f = file('./model/network_model_pretrain_worker.'+args.language, 'rb')
        network_worker = cPickle.load(read_f)
        print >> sys.stderr, "Read model from ./model/network_model_worker." + args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        cluster_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673
            cluster_dimention = 855

        network_worker = network.Worker(inpt_dimention, single_dimention,
                                        cluster_dimention, 1000)
        print >> sys.stderr, "save model network_worker..."
        save_f = file('./model/network_model_worker.' + args.language, 'wb')
        cPickle.dump(network_worker, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    #pretrain_manager
    times = 0
    best_cost = 99999999
    step = 0
    lr = 0.00009
    for echo in range(10):
        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain):
                #cost_this_turn += network_manager.pre_train_step(single_mention_array,train_list,lable_list,0.0001)[0]
                cost_this_turn += network_manager.pre_train_step(
                    single_mention_array, train_list, lable_list, lr)[0]
            step += 1
            if step % 128 == 0:
                lr = lr * 0.99

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain for Manager", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTraining for Manager Use %.3f seconds" % (
            end_time - start_time)

        if cost_this_turn <= best_cost:
            save_f = file(
                './model/network_model_pretrain_manager_best.' + args.language,
                'wb')
            cPickle.dump(network_manager,
                         save_f,
                         protocol=cPickle.HIGHEST_PROTOCOL)
            save_f.close()
            best_cost = cost_this_turn

        save_f = file(
            './model/network_model_pretrain_manager.' + args.language, 'wb')
        cPickle.dump(network_manager,
                     save_f,
                     protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    ## test performance after pretraining
    print >> sys.stderr, "Begin test on DEV after Manager pertraining"
    dev_docs_for_test = []
    num = 0
    for cases, gold_chain in DataGenerate.case_generater(dev_docs, "dev", w2v):
        ev_doc = pretrain.generate_pretrain_test(cases, gold_chain,
                                                 network_manager)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after Manager PreTRAINING"
    mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
    bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
    cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
    print "##################################################"
    sys.stdout.flush()
    print >> sys.stderr, "Manager Pre Train done"

    return

    #pretrain_worker
    times = 0
    best_cost = 99999999
    for echo in range(20):
        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_manager.pre_train_step(
                    single_mention_array, train_list, lable_list, 0.0001)[0]

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain4Manager", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTraining4Manager Use %.3f seconds" % (
            end_time - start_time)

        if cost_this_turn <= best_cost:
            save_f = file(
                './model/network_model_pretrain_manager_best.' + args.language,
                'wb')
            cPickle.dump(network_manager,
                         save_f,
                         protocol=cPickle.HIGHEST_PROTOCOL)
            save_f.close()
            best_cost = cost_this_turn

        save_f = file(
            './model/network_model_pretrain_manager.' + args.language, 'wb')
        cPickle.dump(network_manager,
                     save_f,
                     protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    ## test performance after pretraining
    print >> sys.stderr, "Begin test on DEV after Manager pertraining"
    dev_docs_for_test = []
    num = 0
    for cases, gold_chain in DataGenerate.case_generater(dev_docs, "dev", w2v):
        ev_doc = policy_network.generate_policy_test(cases, gold_chain,
                                                     network_manager)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after Manager PreTRAINING"
    mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
    bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
    cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
    print "##################################################"
    sys.stdout.flush()
    print >> sys.stderr, "Manager Pre Train done"

    ##train
    train4test = []  # add 5 items for testing the training performance
    add2train = True

    for echo in range(20):
        start_time = timeit.default_timer()
        reward_baseline = []
        cost_this_turn = 0.0

        #for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):

            if add2train:
                if random.randint(1, 200) == 10:
                    #train4test.append((train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain))
                    train4test.append((cases, gold_chain))
                    if len(train4test) == 5:
                        add2train = False

            this_reward = 0.0

            #for single, train, action, reward in policy_network.generate_policy_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
            for single, train, action, reward in policy_network.generate_policy_case(
                    cases, gold_chain, network_model):
                #reward_b = 0 if len(reward_baseline) < 1 else float(sum(reward_baseline))/float(len(reward_baseline))
                #norm_reward = numpy.array(reward_batch) - reward_b

                cost_this_turn += network_model.train_step(
                    single, train, action, reward, 0.0001)[0]
        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:", cost_this_turn
        print >> sys.stderr, "TRAINING Use %.3f seconds" % (end_time -
                                                            start_time)

        #reward_baseline.append(this_reward)
        #if len(reward_baseline) >= 32:
        #    reward_baselin = reward_baseline[1:]

        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()

        for train_cases, train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(
                train_cases, train_doc_gold_chain, network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **" % echo
        print "TRAIN"
        mp, mr, mf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        ## dev
        dev_docs_for_test = []
        start_time = timeit.default_timer()
        #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
        #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        for dev_cases, dev_doc_gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                dev_cases, dev_doc_gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
        print "DEV"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        end_time = timeit.default_timer()
        print >> sys.stderr, "DEV Use %.3f seconds" % (end_time - start_time)
        sys.stdout.flush()

        ## test
        test_docs_for_test = []
        start_time = timeit.default_timer()
        #for test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain in DataGenerate.array_generater(test_docs,"test",w2v):
        for test_cases, test_doc_gold_chain in DataGenerate.case_generater(
                test_docs, "test", w2v):
            ev_doc = policy_network.generate_policy_test(
                test_cases, test_doc_gold_chain, network_model)
            test_docs_for_test.append(ev_doc)
        print "TEST"
        mp, mr, mf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        end_time = timeit.default_timer()
        print >> sys.stderr, "TEST Use %.3f seconds" % (end_time - start_time)
        sys.stdout.flush()

        save_f = file(
            './model/nets/network_model.%s.%d' % (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()
Example #10
0
def main():

    DIR = args.DIR
    embedding_file = args.embedding_dir

    #network_file = "./model/model.pkl"
    #network_file = "./model/pretrain/network_model_pretrain.20"
    network_file = "./model/pretrain/network_model_pretrain.top.best"
    if os.path.isfile(network_file):
        print >> sys.stderr, "Read model from ./model/model.pkl"
        network_model = torch.load(network_file)
    else:
        embedding_matrix = numpy.load(embedding_file)
        #print len(embedding_matrix)

        "Building torch model"
        network_model = network.Network(pair_feature_dimention,
                                        mention_feature_dimention,
                                        word_embedding_dimention,
                                        span_dimention, 1000, embedding_size,
                                        embedding_dimention,
                                        embedding_matrix).cuda()
        print >> sys.stderr, "save model ..."
        torch.save(network_model, network_file)

    reduced = ""
    if args.reduced == 1:
        reduced = "_reduced"

    print >> sys.stderr, "prepare data for train ..."
    train_docs = DataReader.DataGnerater("train" + reduced)
    #train_docs = DataReader.DataGnerater("dev"+reduced)
    print >> sys.stderr, "prepare data for dev and test ..."
    dev_docs = DataReader.DataGnerater("dev" + reduced)
    #test_docs = DataReader.DataGnerater("test"+reduced)

    l2_lambda = 1e-6
    lr = 0.00002
    dropout_rate = 0.5
    shuffle = True
    times = 0
    best_thres = 0.5

    reinforce = True

    model_save_dir = "./model/pretrain/"

    metrics = performance.performance(dev_docs, network_model)

    p, r, f = metrics["b3"]

    f_b = [f]

    #for echo in range(30,200):
    for echo in range(20):

        start_time = timeit.default_timer()
        print "Pretrain Epoch:", echo

        #if echo == 100:
        #    lr = lr/2.0
        #if echo == 150:
        #    lr = lr/2.0

        #optimizer = optim.RMSprop(filter(lambda p: p.requires_grad, network_model.parameters()), lr=lr, weight_decay=l2_lambda)
        #optimizer = optim.RMSprop(network_model.parameters(), lr=lr, weight_decay=l2_lambda)
        cost = 0.0
        optimizer = optim.RMSprop(network_model.parameters(),
                                  lr=lr,
                                  eps=1e-5,
                                  weight_decay=l2_lambda)

        pair_cost_this_turn = 0.0
        ana_cost_this_turn = 0.0

        pair_nums = 0
        ana_nums = 0

        pos_num = 0
        neg_num = 0
        inside_time = 0.0

        score_softmax = nn.Softmax()

        cluster_info = []
        new_cluster_num = 0
        cluster_info.append(-1)
        action_list = []
        new_cluster_info = []
        tmp_data = []

        #for data in train_docs.rl_case_generater():
        for data in train_docs.rl_case_generater(shuffle=True):
            inside_time += 1

            this_doc = train_docs
            tmp_data.append(data)

            mention_word_index, mention_span, candi_word_index,candi_span,feature_pair,pair_antecedents,pair_anaphors,\
            target,positive,negative,anaphoricity_word_indexs, anaphoricity_spans, anaphoricity_features, anaphoricity_target,rl,candi_ids_return = data

            gold_chain = this_doc.gold_chain[rl["did"]]
            gold_dict = {}
            for chain in gold_chain:
                for item in chain:
                    gold_dict[item] = chain

            mention_index = autograd.Variable(
                torch.from_numpy(mention_word_index).type(
                    torch.cuda.LongTensor))
            mention_span = autograd.Variable(
                torch.from_numpy(mention_span).type(torch.cuda.FloatTensor))
            candi_index = autograd.Variable(
                torch.from_numpy(candi_word_index).type(torch.cuda.LongTensor))
            candi_spans = autograd.Variable(
                torch.from_numpy(candi_span).type(torch.cuda.FloatTensor))
            pair_feature = autograd.Variable(
                torch.from_numpy(feature_pair).type(torch.cuda.FloatTensor))
            anaphors = autograd.Variable(
                torch.from_numpy(pair_anaphors).type(torch.cuda.LongTensor))
            antecedents = autograd.Variable(
                torch.from_numpy(pair_antecedents).type(torch.cuda.LongTensor))

            anaphoricity_index = autograd.Variable(
                torch.from_numpy(anaphoricity_word_indexs).type(
                    torch.cuda.LongTensor))
            anaphoricity_span = autograd.Variable(
                torch.from_numpy(anaphoricity_spans).type(
                    torch.cuda.FloatTensor))
            anaphoricity_feature = autograd.Variable(
                torch.from_numpy(anaphoricity_features).type(
                    torch.cuda.FloatTensor))

            output, pair_score = network_model.forward_all_pair(
                word_embedding_dimention, mention_index, mention_span,
                candi_index, candi_spans, pair_feature, anaphors, antecedents,
                dropout_rate)
            ana_output, ana_score = network_model.forward_anaphoricity(
                word_embedding_dimention, anaphoricity_index,
                anaphoricity_span, anaphoricity_feature, dropout_rate)

            reindex = autograd.Variable(
                torch.from_numpy(rl["reindex"]).type(torch.cuda.LongTensor))

            scores_reindex = torch.transpose(
                torch.cat((pair_score, ana_score), 1), 0, 1)[reindex]
            #scores_reindex = torch.transpose(torch.cat((pair_score,-1-0.3*ana_score),1),0,1)[reindex]

            for s, e in zip(rl["starts"], rl["ends"]):
                #action_prob: scores_reindex[s:e][1]
                score = score_softmax(
                    torch.transpose(scores_reindex[s:e], 0,
                                    1)).data.cpu().numpy()[0]
                this_action = utils.sample_action(score)
                #this_action = ac_list.index(max(score.tolist()))
                action_list.append(this_action)

                if this_action == len(score) - 1:
                    should_cluster = new_cluster_num
                    new_cluster_num += 1
                    new_cluster_info.append(1)
                else:
                    should_cluster = cluster_info[this_action]
                    new_cluster_info.append(0)

                cluster_info.append(should_cluster)

            if rl["end"] == True:
                ev_document = utils.get_evaluation_document(
                    cluster_info, this_doc.gold_chain[rl["did"]],
                    candi_ids_return, new_cluster_num)
                p, r, f = evaluation.evaluate_documents([ev_document],
                                                        evaluation.b_cubed)
                trick_reward = utils.get_reward_trick(cluster_info, gold_dict,
                                                      new_cluster_info,
                                                      action_list,
                                                      candi_ids_return)

                #reward = f + trick_reward
                average_f = float(sum(f_b)) / len(f_b)

                reward = (f - average_f) * 10

                f_b.append(f)
                if len(f_b) > 128:
                    f_b = f_b[1:]

                index = 0
                for data in tmp_data:
                    mention_word_index, mention_span, candi_word_index,candi_span,feature_pair,pair_antecedents,pair_anaphors,\
                    target,positive,negative,anaphoricity_word_indexs, anaphoricity_spans, anaphoricity_features, anaphoricity_target,rl,candi_ids_return = data

                    mention_index = autograd.Variable(
                        torch.from_numpy(mention_word_index).type(
                            torch.cuda.LongTensor))
                    mention_span = autograd.Variable(
                        torch.from_numpy(mention_span).type(
                            torch.cuda.FloatTensor))
                    candi_index = autograd.Variable(
                        torch.from_numpy(candi_word_index).type(
                            torch.cuda.LongTensor))
                    candi_spans = autograd.Variable(
                        torch.from_numpy(candi_span).type(
                            torch.cuda.FloatTensor))
                    pair_feature = autograd.Variable(
                        torch.from_numpy(feature_pair).type(
                            torch.cuda.FloatTensor))
                    anaphors = autograd.Variable(
                        torch.from_numpy(pair_anaphors).type(
                            torch.cuda.LongTensor))
                    antecedents = autograd.Variable(
                        torch.from_numpy(pair_antecedents).type(
                            torch.cuda.LongTensor))

                    anaphoricity_index = autograd.Variable(
                        torch.from_numpy(anaphoricity_word_indexs).type(
                            torch.cuda.LongTensor))
                    anaphoricity_span = autograd.Variable(
                        torch.from_numpy(anaphoricity_spans).type(
                            torch.cuda.FloatTensor))
                    anaphoricity_feature = autograd.Variable(
                        torch.from_numpy(anaphoricity_features).type(
                            torch.cuda.FloatTensor))

                    rl_costs = autograd.Variable(
                        torch.from_numpy(rl["costs"]).type(
                            torch.cuda.FloatTensor))
                    rl_costs = torch.transpose(rl_costs, 0, 1)

                    output, pair_score = network_model.forward_all_pair(
                        word_embedding_dimention, mention_index, mention_span,
                        candi_index, candi_spans, pair_feature, anaphors,
                        antecedents, dropout_rate)
                    ana_output, ana_score = network_model.forward_anaphoricity(
                        word_embedding_dimention, anaphoricity_index,
                        anaphoricity_span, anaphoricity_feature, dropout_rate)

                    reindex = autograd.Variable(
                        torch.from_numpy(rl["reindex"]).type(
                            torch.cuda.LongTensor))

                    optimizer.zero_grad()
                    loss = None
                    scores_reindex = torch.transpose(
                        torch.cat((pair_score, ana_score), 1), 0, 1)[reindex]
                    #scores_reindex = torch.transpose(torch.cat((pair_score,-1-0.3*ana_score),1),0,1)[reindex]

                    for s, e in zip(rl["starts"], rl["ends"]):
                        #action_prob: scores_reindex[s:e][1]
                        this_action = action_list[index]
                        #current_reward = reward + trick_reward[index]
                        current_reward = reward

                        #this_loss = -reward*(torch.transpose(F.log_softmax(torch.transpose(scores_reindex[s:e],0,1)),0,1)[this_action])
                        this_loss = -current_reward * (torch.transpose(
                            F.log_softmax(
                                torch.transpose(scores_reindex[s:e], 0, 1)), 0,
                            1)[this_action])

                        if loss is None:
                            loss = this_loss
                        else:
                            loss += this_loss
                        index += 1
                    #loss /= len(rl["starts"])
                    loss /= len(rl["starts"])
                    #loss = loss/train_docs.scale_factor
                    ## policy graident
                    cost += loss.data[0]
                    loss.backward()
                    optimizer.step()

                new_cluster_num = 0
                cluster_info = []
                cluster_info.append(-1)
                tmp_data = []
                action_list = []
                new_cluster_info = []
            #if inside_time%50 == 0:
            #    performance.performance(dev_docs,network_model)
            #    print
            #    sys.stdout.flush()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print >> sys.stderr, "cost:", cost
        #print >> sys.stderr,"save model ..."
        #torch.save(network_model, model_save_dir+"network_model_pretrain.%d"%echo)

        performance.performance(dev_docs, network_model)

        sys.stdout.flush()
Example #11
0
def main():

    embedding_dir = args.embedding+args.language

    print >> sys.stderr,"Read Embedding from %s ..."%embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir,embedding_dimention)

    #network_model
    if os.path.isfile("./model/network_model."+args.language):
        read_f = file('./model/network_model.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr,"Read model from ./model/network_model."+args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention,single_dimention,1000)
        print >> sys.stderr,"save model ..."
        save_f = file('./model/network_model.'+args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    most_time = 100
    most_time_test = 50

    #pretrain
    for echo in range(10):
        start_time = timeit.default_timer()
        print "Pretrain ECHO:",echo
        cost_this_turn = 0.0
        num = most_time
        #print >> sys.stderr, network_model.get_weight_sum()
        for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
            num -= 1
            if num <= 0:
                break
            for single_mention_array,train_list,lable_list in pretrain.generate_pretrain_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
                #print single_mention_array
                cost_this_turn += network_model.pre_train_step(single_mention_array,train_list,lable_list,0.0003)[0]

        for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
            ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
            break
 
        print network_model.get_weight_sum()
        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain",echo,"Total cost:",cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds"%(end_time-start_time)

    save_f = file('./model/network_model_pretrain.'+args.language, 'wb')
    cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
    save_f.close()
    print >> sys.stderr,"Begin test on DEV after pertraining"
    
    ## test performance on dev after pretraining
    dev_docs_for_test = []
    num = most_time_test
    #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
    for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
        num -= 1
        if num <= 0:
            break
        ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on TRAIN after PreTRAINING"
    mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    print "##################################################" 
    sys.stdout.flush()
    print >> sys.stderr,"Pre Train done"

    ## test performance on dev after pretraining
    dev_docs_for_test = []
    num = most_time_test
    for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
        num -= 1
        if num <= 0:
            break
        ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    print "##################################################" 
    sys.stdout.flush()
    print >> sys.stderr,"Pre Train done"
    return



    ##train
    train4test = [] # add 5 items for testing the training performance
    add2train = True

    for echo in range(20):
        start_time = timeit.default_timer()
        reward_baseline = []
        cost_this_turn = 0.0

        trick_num = 0
        for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
            
            #trick_num += 1
            #if trick_num < 80:
            #    continue
        
            if add2train:
                if random.randint(1,200) == 100:
                    train4test.append((train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain))
                    if len(train4test) == 5:
                        add2train = False

            this_reward = 0.0

            #for train_batch, mask_batch, action_batch, reward_batch in policy_network.generate_policy_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
            for single, train, action, reward in policy_network.generate_policy_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
                #this_reward = reward_batch

                #reward_b = 0 if len(reward_baseline) < 1 else float(sum(reward_baseline))/float(len(reward_baseline))
                #norm_reward = numpy.array(reward_batch) - reward_b

                #cost_this_turn += network_model.train_step(train_batch,mask_batch,action_batch,norm_reward,0.0001)[0]
                cost_this_turn += network_model.train_step(single,train,action,reward,0.0001)[0]
        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:",cost_this_turn
        print >> sys.stderr, "TRAINING Use %.3f seconds"%(end_time-start_time)
        
        #reward_baseline.append(this_reward)
        #if len(reward_baseline) >= 32:
        #    reward_baselin = reward_baseline[1:]

        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()
        for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **"%echo
        print "TRAIN"
        mp,mr,mf = evaluation.evaluate_documents(train_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(train_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(train_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print

        ## dev
        dev_docs_for_test = []
        start_time = timeit.default_timer()
        for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
            ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
            dev_docs_for_test.append(ev_doc)
        print "DEV"
        mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print 

        end_time = timeit.default_timer()
        print >> sys.stderr, "DEV Use %.3f seconds"%(end_time-start_time)
        sys.stdout.flush()
    
        ## test
        test_docs_for_test = []
        start_time = timeit.default_timer()
        for test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain in DataGenerate.array_generater(test_docs,"test",w2v):
            ev_doc = policy_network.generate_policy_test(test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain,network_model)
            test_docs_for_test.append(ev_doc)
        print "TEST"
        mp,mr,mf = evaluation.evaluate_documents(test_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(test_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(test_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print 

        end_time = timeit.default_timer()
        print >> sys.stderr, "TEST Use %.3f seconds"%(end_time-start_time)
        sys.stdout.flush()

        save_f = file('./model/nets/network_model.%s.%d'%(args.language,echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()
Example #12
0
def main():

    embedding_dir = args.embedding + args.language

    print >> sys.stderr, "Read Embedding from %s ..." % embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir + ".filtered", embedding_dimention)

    #network_model
    if os.path.isfile("./model/network_model_index." + args.language):
        read_f = file('./model/network_model_index.' + args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr, "Read model from ./model/network_model_index." + args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention, single_dimention, 1000,
                                        embedding_dir + ".filtered",
                                        embedding_dimention)
        print >> sys.stderr, "save model ..."
        save_f = file('./model/network_model_index.' + args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    #pretrain
    times = 0
    for echo in range(20):
        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, single_index, train_list, train_index, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                print network_model.fff(train_list, train_index)
                cost_this_turn += network_model.pre_train_step(
                    single_mention_array, single_index, train_list,
                    train_index, lable_list, 0.0001)[0]

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)

        if echo % 4 == 0:
            save_f = file(
                './model/network_model_pretrain_index.' + args.language, 'wb')
            cPickle.dump(network_model,
                         save_f,
                         protocol=cPickle.HIGHEST_PROTOCOL)
            save_f.close()

    save_f = file('./model/network_model_pretrain_index.' + args.language,
                  'wb')
    cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
    save_f.close()

    print >> sys.stderr, "Begin test on DEV after pertraining"

    ## test performance after pretraining
    dev_docs_for_test = []
    num = 0
    for cases, gold_chain in DataGenerate.case_generater(dev_docs, "dev", w2v):
        ev_doc = policy_network.generate_policy_test(cases, gold_chain,
                                                     network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
    bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
    cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
    print "##################################################"
    sys.stdout.flush()
    print >> sys.stderr, "Pre Train done"

    ##train
    train4test = []  # add 5 items for testing the training performance
    add2train = True

    for echo in range(20):
        start_time = timeit.default_timer()
        reward_baseline = []
        cost_this_turn = 0.0

        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):

            if add2train:
                if random.randint(1, 200) == 10:
                    #train4test.append((train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain))
                    train4test.append((cases, gold_chain))
                    if len(train4test) == 5:
                        add2train = False

            this_reward = 0.0

            for single, sindex, train, tindex, action, reward in policy_network.generate_policy_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_model.train_step(
                    single, sindex, train, tindex, action, reward, 0.0001)[0]

        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:", cost_this_turn
        print >> sys.stderr, "TRAINING Use %.3f seconds" % (end_time -
                                                            start_time)

        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()

        for train_cases, train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(
                train_cases, train_doc_gold_chain, network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **" % echo
        print "TRAIN"
        mp, mr, mf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        ## dev
        dev_docs_for_test = []
        start_time = timeit.default_timer()
        #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
        #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        for dev_cases, dev_doc_gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                dev_cases, dev_doc_gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
        print "DEV"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        end_time = timeit.default_timer()
        print >> sys.stderr, "DEV Use %.3f seconds" % (end_time - start_time)
        sys.stdout.flush()

        ## test
        test_docs_for_test = []
        start_time = timeit.default_timer()
        #for test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain in DataGenerate.array_generater(test_docs,"test",w2v):
        for test_cases, test_doc_gold_chain in DataGenerate.case_generater(
                test_docs, "test", w2v):
            ev_doc = policy_network.generate_policy_test(
                test_cases, test_doc_gold_chain, network_model)
            test_docs_for_test.append(ev_doc)
        print "TEST"
        mp, mr, mf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        end_time = timeit.default_timer()
        print >> sys.stderr, "TEST Use %.3f seconds" % (end_time - start_time)
        sys.stdout.flush()
        '''
Example #13
0
def main():

    embedding_dir = args.embedding+args.language

    print >> sys.stderr,"Read Embedding from %s ..."%embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir,embedding_dimention)

    #network_model
    net_dir = "./model/pretrain_batch/network_model_pretrain.cn.9"
    if os.path.isfile("./model/network_model_batch."+args.language):
        #read_f = file('./model/network_model_batch.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.cn.best', 'rb')
        read_f = file(net_dir, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr,"Read model from ./model/network_model_batch."+args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention,single_dimention,1000)
        print >> sys.stderr,"save model ..."
        save_f = file('./model/network_model_batch.'+args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")


    #pretrain
    l2_lambda = 0.0000003
    lr = 0.00002
    ce_lambda = 0.005

    times = 0
    for echo in range(0):

        start_time = timeit.default_timer()
        print "Pretrain ECHO:",echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        for cases,gold_chain in DataGenerate.case_generater(train_docs,"train",w2v):
            if len(cases) >= 700:
                continue
            for train_list,single_mention_array,mask_list,lable_list in pretrain.generate_pretrain_case_batch(cases,gold_chain,network_model):
                cost_this_turn += network_model.pre_train_step(single_mention_array,train_list,mask_list,lable_list,lr,l2_lambda)[0]
        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain",echo,"Total cost:",cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds"%(end_time-start_time)

        save_f = file('./model/pretrain_batch/network_model_pretrain_noNorm.%s.%d'%(args.language,echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()


    for echo in range(0):
        start_time = timeit.default_timer()
        cost_this_turn = 0.0
        for cases,gold_chain in DataGenerate.case_generater(train_docs,"train",w2v):
            if len(cases) >= 700:
                continue
            for train_list,single_mention_array,mask_list,lable_list in pretrain.generate_pretrain_case_batch(cases,gold_chain,network_model):
                cost_this_turn += network_model.pre_ce_train_step(single_mention_array,train_list,mask_list,lable_list,lr,l2_lambda,ce_lambda)[0]

        save_f = file('./model/pretrain_batch/network_model_pretrain.%s.%d'%(args.language,echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain",echo,"Total cost:",cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds"%(end_time-start_time)

    print >> sys.stderr,"Begin test on DEV after pertraining"
    
    ## test performance after pretraining
    dev_docs_for_test = []
    num = 0
    #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
        #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
    for cases,gold_chain in DataGenerate.case_generater(dev_docs,"dev",w2v):
        ev_doc = policy_network.generate_policy_test(cases,gold_chain,network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    print "#################################################" 
    sys.stdout.flush()
    print >> sys.stderr,"Pre Train done"
    ##train
    train4test = [] # add 5 items for testing the training performance
    add2train = True

    for echo in range(10):
        start_time = timeit.default_timer()
        reward_baseline = []
        cost_this_turn = 0.0
        average_reward = 0.0
        done_case_num = 0

        l2_lambda = 0.000003
        lr = 0.000002
        ce_lambda = 0.0

        for cases,gold_chain in DataGenerate.case_generater(train_docs,"train",w2v):
            if len(cases) >= 700:
                continue

            if add2train:
                if random.randint(1,200) == 10:
                #if not random.randint(1,200) == 10:
                    #train4test.append((train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain))
                    train4test.append((cases,gold_chain))
                    if len(train4test) == 20:
                        add2train = False

            this_reward = 0.0
            reward_b = 0 if len(reward_baseline) < 1 else float(sum(reward_baseline))/float(len(reward_baseline))

            for train, single, mask, action, reward in policy_network.generate_policy_case(cases,gold_chain,network_model):

                if len(train) <= 1:
                    continue
            #for single, train, action, reward , acp in policy_network.generate_policy_case_trick(cases,gold_chain,network_model):

                norm_reward = reward - reward_b

                this_reward = reward
                
                this_cost = network_model.train_step(single,train,mask,action,reward*100,lr,l2_lambda,ce_lambda)[0]
                #print this_cost,acp,reward
                cost_this_turn += this_cost

            average_reward += this_reward
            done_case_num += 1

            #if done_case_num >= 1:
            #    break
        print network_model.get_weight_sum()
        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:",cost_this_turn
        print >> sys.stderr, "Average Reward:",average_reward/float(done_case_num)
        print >> sys.stderr, "TRAINING Use %.3f seconds"%(end_time-start_time)

        reward_baseline.append(this_reward)
        if len(reward_baseline) >= 64:
            reward_baselin = reward_baseline[1:]

        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()

        for train_cases,train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(train_cases,train_doc_gold_chain,network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **"%echo
        print "TRAIN"
        mp,mr,mf = evaluation.evaluate_documents(train_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(train_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(train_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print

        ## dev
        dev_docs_for_test = []
        start_time = timeit.default_timer()
        for dev_cases,dev_doc_gold_chain in DataGenerate.case_generater(dev_docs,"dev",w2v):
            ev_doc = policy_network.generate_policy_test(dev_cases,dev_doc_gold_chain,network_model)
            dev_docs_for_test.append(ev_doc)
        print "DEV"
        mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print 

        end_time = timeit.default_timer()
        print >> sys.stderr, "DEV Use %.3f seconds"%(end_time-start_time)
        sys.stdout.flush()
   
        ## test
        test_docs_for_test = []
        start_time = timeit.default_timer()
        #for test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain in DataGenerate.array_generater(test_docs,"test",w2v):
        for test_cases,test_doc_gold_chain in DataGenerate.case_generater(test_docs,"test",w2v):
            ev_doc = policy_network.generate_policy_test(test_cases,test_doc_gold_chain,network_model)
            test_docs_for_test.append(ev_doc)
        print "TEST"
        mp,mr,mf = evaluation.evaluate_documents(test_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(test_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(test_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print 

        end_time = timeit.default_timer()
        print >> sys.stderr, "TEST Use %.3f seconds"%(end_time-start_time)
        sys.stdout.flush()

        save_f = file('./model/nets/network_model_batch.%s.%d'%(args.language,echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()