Example #1
0
def get_network():
    n = network.NetWork('network')
    n.add(neural.Dense(10, input_dim=784))
    n.add(neural.Softmax())
    n.loss(loss.CategoryCrossEntropy())
    n.optimizer(optimizer.Adagrad())
    return n
Example #2
0
 def __init__(self, dimenssion, xy_min, xy_max, grid_value_list, size=100):
     self.grid = []
     self.locations = []
     self.grid_dimenssion = []
     self.Network = network.NetWork()
     self.set_dimenssion(dimenssion)
     self.set_grid()
     self.set_locations(xy_min, xy_max, grid_value_list, size)
Example #3
0
 def __init__(self, dimenssion, xy_args=0, size=100):
     self.grid = []
     self.locations = []
     self.grid_dimenssion = []
     self.Network = network.NetWork()
     self.set_dimenssion(dimenssion)
     self.set_grid()
     self.set_locationList(size)
def train_the_model():
    tf.reset_default_graph()
    nw = network.NetWork()
    init = tf.initialize_all_variables()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.75)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        writer = tf.summary.FileWriter(os.path.join(config.tensorboard_path,"basic"))
        writer1 = tf.summary.FileWriter(os.path.join(config.tensorboard_path, "inference"))
        writer2 = tf.summary.FileWriter(os.path.join(config.tensorboard_path, "inference_random"))
        writer.add_graph(sess.graph)
        saver = tf.train.Saver(tf.global_variables())
        for i in range(config.epocs):

            batch_dict = fetch_train_batch()
            batch = batch_dict['batch_holder']
            labels = batch_dict['labels']
            feed_dict = {'inputframe:0': batch, 'phase:0' : [1], 'labels:0': labels}
            #print(np.sum(sess.run([nw.broadcast], feed_dict=feed_dict)))
            _, loss = sess.run([nw.train, nw.r_loss], feed_dict=feed_dict)
            print('run # ',i,' -Reconstruction Loss', loss)

            if i % config.save_for_tensorboard == 0:
                batch_dict = fetch_train_batch(inference = True)
                batch = batch_dict['batch_holder']
                labels = batch_dict['labels']
                feed_dict1 = {'inputframe:0': batch, 'phase:0': [0], 'labels:0': labels}

                merge_inf = sess.run(nw.merged, feed_dict=feed_dict1)
                writer1.add_summary(merge_inf,i)

            if i %  config.save_for_tensorboard == 0:
                batch_dict = fetch_train_batch(shuffle_target_frame=True, inference=True)
                batch = batch_dict['batch_holder']
                labels = batch_dict['labels']
                feed_dict2 = {'inputframe:0': batch, 'phase:0': [0], 'labels:0': labels}

                merge_inf_r = sess.run(nw.merged, feed_dict=feed_dict2)
                writer2.add_summary(merge_inf_r, i)


            if i % config.save_for_tensorboard == 0:
                merged = sess.run(nw.merged, feed_dict=feed_dict)
                writer.add_summary(merged, i)

            if i % config.save_at_N_epocs == 0:
                saver.save(sess, os.path.join(config.save_path + "/custom_trained_model","VAE_CUSTOM.ckpt"))
Example #5
0
class Environment():
    Network = network.NetWork()
    grid = []
    locations = []
    grid_dimenssion = []

    def __init__(self):
        self.grid = []
        self.locations = []
        self.grid_dimenssion = []
        self.Network = network.NetWork()

    def set_environment(self, environment):
        self.Network = environment.Network
        self.grid = environment.grid
        self.locations = environment.locations
        self.grid_dimenssion = environment.grid_dimenssion

    #set the parameters of the class
    def set_dimenssion(self, dimenssion):
        if (len(dimenssion) == 2):
            self.grid_dimenssion = dimenssion
        elif (len(dimenssion) == 1):
            self.grid_dimenssion = [dimenssion[0], dimenssion[0]]
        else:
            pass

    def set_locations(self, x_arg, y_arg, size):
        pass

    def set_grid(self):
        if (len(self.grid_dimenssion) == 2):
            pass

    def set_network(self, path):
        pass
Example #6
0
 def __init__(self):
     self.grid = []
     self.locations = []
     self.grid_dimenssion = []
     self.Network = network.NetWork()
Example #7
0
def main():

    embedding_dir = args.embedding + args.language

    print >> sys.stderr, "Read Embedding from %s ..." % embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir, embedding_dimention)

    #network_model
    #net_dir = "./model/pretrain/network_model_pretrain.cn.19"
    #net_dir = "./model/pretrain_manu_dropout/network_model_pretrain.cn.10"
    if os.path.isfile("./model/network_model." + args.language):
        read_f = file('./model/network_model.' + args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.cn.best', 'rb')
        #read_f = file(net_dir, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr, "Read model from ./model/network_model." + args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention, single_dimention, 1000)
        print >> sys.stderr, "save model ..."
        save_f = file('./model/network_model.' + args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    #pretrain
    l2_lambda = 0.0000001
    lr = 0.03
    ce_lambda = 0.0001
    dropout_rate = 0.2

    print "Weight Sum", network_model.get_weight_sum()

    times = 0
    #for echo in range(11,40):
    for echo in range(10):

        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        done_num = 0
        pos_num = 0
        neg_num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):

                #cost_this_turn += network_model.pre_train_step(single_mention_array,train_list,lable_list,lr,l2_lambda,dropout_rate)[0]

                if lable_list[0] == 1:
                    neg_num += 1
                    ana_cost, ana_result = network_model.ana_train_step(
                        single_mention_array, 1, lr, l2_lambda, dropout_rate)
                else:
                    pos_num += 1
                    ana_cost, ana_result = network_model.ana_train_step(
                        single_mention_array, 0, lr, l2_lambda, dropout_rate)
                for intance, lable in zip(train_list, lable_list):
                    mention_cost, mention_result = network_model.mention_train_step(
                        intance, lable, lr, l2_lambda, dropout_rate)

            done_num += 1
            if done_num == 10:
                break
        lr = lr * 0.99

        save_f = file(
            './model/pretrain_manu_new/network_model_pretrain_pair.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "POS:NEG", pos_num, neg_num
        print >> sys.stderr, "lr", lr
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print "Weight Sum", network_model.get_weight_sum()

        ## test performance after pretraining
        dev_docs_for_test = []
        num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                cases, gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
            num += 1
            if num == 10:
                break
        print "Performance on DEV after PreTRAINING"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print "#################################################"
        sys.stdout.flush()

    print >> sys.stderr, "Begin Normal Training"
    for echo in range(30):

        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        done_num = 0
        pos_num = 0
        neg_num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_model.pre_train_step(
                    single_mention_array, train_list, lable_list, lr,
                    l2_lambda, dropout_rate)[0]
                #cost_this_turn += network_model.pre_top_train_step(single_mention_array,train_list,lable_list,lr,l2_lambda)[0]

                if lable_list[0] == 1:
                    neg_num += 1
                else:
                    pos_num += 1

            done_num += 1
            #if done_num == 10:
            #    break
        lr = lr * 0.99

        save_f = file(
            './model/pretrain_manu_new/network_model_pretrain.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "POS:NEG", pos_num, neg_num
        print >> sys.stderr, "lr", lr
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print "Weight Sum", network_model.get_weight_sum()

        ## test performance after pretraining
        dev_docs_for_test = []
        num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                cases, gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
            num += 1
            if num == 10:
                break
        print "Performance on DEV after PreTRAINING"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print "#################################################"
        sys.stdout.flush()

    return

    for echo in range(30, 50):
        start_time = timeit.default_timer()
        cost_this_turn = 0.0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_model.pre_ce_train_step(
                    single_mention_array, train_list, lable_list, lr,
                    l2_lambda, ce_lambda)[0]

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)
        print "Weight Sum", network_model.get_weight_sum()

        ## test performance after pretraining
        dev_docs_for_test = []
        num = 0
        for cases, gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                cases, gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
        print "Performance on DEV after PreTRAINING"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print "#################################################"
        sys.stdout.flush()

        save_f = file(
            './model/pretrain_manu_new/network_model_pretrain.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    ## test performance after pretraining
    print >> sys.stderr, "Begin test on DEV after pertraining"
    dev_docs_for_test = []
    num = 0
    #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
    #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
    for cases, gold_chain in DataGenerate.case_generater(dev_docs, "dev", w2v):
        ev_doc = policy_network.generate_policy_test(cases, gold_chain,
                                                     network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
    bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
    cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
    print "#################################################"
    sys.stdout.flush()
    print >> sys.stderr, "Pre Train done"
Example #8
0
def train_network():
    tf.reset_default_graph()
    net = nn.NetWork(config.data_size,
                     param_state_size=config.param_state_size)
    init = tf.global_variables_initializer()

    SCF = 1  #fetch_data.get_scaling_factor(config.data_path)

    with tf.Session() as sess:
        sess.run(init)

        sub_dir = os.path.join(
            config.tensor_board,
            'TS' + util.get_name_ext() + time.strftime("%Y%m%d-%H%M%S"))
        sub_dir_test = os.path.join(
            config.tensor_board,
            'TS_Test' + util.get_name_ext() + time.strftime("%Y%m%d-%H%M%S"))
        os.mkdir(sub_dir)
        writer = tf.summary.FileWriter(sub_dir)
        writer_test = tf.summary.FileWriter(sub_dir_test)
        writer.add_graph(sess.graph)

        saver = tf.train.Saver(tf.global_variables())
        graph_handle = util.find_graph(config.path_e)

        store_integrator_loss = -1

        if graph_handle:
            graph_handle.restore(sess,
                                 tf.train.latest_checkpoint(config.path_e))

        for i in range(config.training_runs):

            inputs = fetch_data.get_volume(config.data_path,
                                           batch_size=config.batch_size,
                                           scaling_factor=SCF)
            inputs['Train/step:0'] = i

            if i % config.f_tensorboard == 0 and config.f_tensorboard != 0 and os.path.isdir(
                    config.tensor_board):
                loss, lr, merged, _ = sess.run(
                    [net.loss, net.lr, net.merged, net.train], inputs)
                writer.add_summary(merged, i)
            else:
                loss, lr, _ = sess.run([net.loss, net.lr, net.train], inputs)

            if os.path.isdir(config.path_e) and i % config.save_freq == 0:
                saver.save(
                    sess,
                    os.path.join(config.path_e,
                                 util.get_name_ext() + "_trained_model.ckpt"))
                print('Saving graph')

            if i % 500 == 0 and os.path.isdir(config.tensor_board):
                inputs_ci = fetch_data.get_volume(config.benchmark_data,
                                                  1,
                                                  scaling_factor=SCF)
                if inputs_ci:
                    util.create_gif_encoder(config.benchmark_data,
                                            sess,
                                            net,
                                            i=i,
                                            save_frequency=5000,
                                            SCF=SCF)
                if not inputs_ci:
                    inputs_ci = fetch_data.get_volume(config.data_path,
                                                      1,
                                                      scaling_factor=SCF)
                    util.create_gif_encoder(config.data_path,
                                            sess,
                                            net,
                                            i=i,
                                            save_frequency=5000,
                                            SCF=SCF)

                inputs_ci['Train/step:0'] = i

                loss, merged = sess.run([net.loss, net.merged], inputs_ci)
                writer_test.add_summary(merged, i)
                test_field = sess.run(net.y, inputs_ci)
                np.save(
                    os.path.join(
                        config.test_field_path, 'train_field' +
                        util.get_name_ext() + time.strftime("%Y%m%d-%H%M")),
                    test_field)

                # Record execution stats
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                _ = sess.run([net.train],
                             feed_dict=inputs,
                             options=run_options,
                             run_metadata=run_metadata)
                writer.add_run_metadata(run_metadata, 'step%d' % i)

            if not i % 10:
                print('Training Run', i, 'Learning Rate', lr,
                      '//  Encoder Loss:', loss, '//  Integrator Loss',
                      store_integrator_loss)
Example #9
0
def main():

    embedding_dir = args.embedding+args.language

    print >> sys.stderr,"Read Embedding from %s ..."%embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir,embedding_dimention)

    #network_model
    if os.path.isfile("./model/network_model."+args.language):
        read_f = file('./model/network_model.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr,"Read model from ./model/network_model."+args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention,single_dimention,1000)
        print >> sys.stderr,"save model ..."
        save_f = file('./model/network_model.'+args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    most_time = 100
    most_time_test = 50

    #pretrain
    for echo in range(10):
        start_time = timeit.default_timer()
        print "Pretrain ECHO:",echo
        cost_this_turn = 0.0
        num = most_time
        #print >> sys.stderr, network_model.get_weight_sum()
        for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
            num -= 1
            if num <= 0:
                break
            for single_mention_array,train_list,lable_list in pretrain.generate_pretrain_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
                #print single_mention_array
                cost_this_turn += network_model.pre_train_step(single_mention_array,train_list,lable_list,0.0003)[0]

        for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
            ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
            break
 
        print network_model.get_weight_sum()
        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain",echo,"Total cost:",cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds"%(end_time-start_time)

    save_f = file('./model/network_model_pretrain.'+args.language, 'wb')
    cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
    save_f.close()
    print >> sys.stderr,"Begin test on DEV after pertraining"
    
    ## test performance on dev after pretraining
    dev_docs_for_test = []
    num = most_time_test
    #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
    for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
        num -= 1
        if num <= 0:
            break
        ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on TRAIN after PreTRAINING"
    mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    print "##################################################" 
    sys.stdout.flush()
    print >> sys.stderr,"Pre Train done"

    ## test performance on dev after pretraining
    dev_docs_for_test = []
    num = most_time_test
    for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
        num -= 1
        if num <= 0:
            break
        ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
    bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
    cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
    print "##################################################" 
    sys.stdout.flush()
    print >> sys.stderr,"Pre Train done"
    return



    ##train
    train4test = [] # add 5 items for testing the training performance
    add2train = True

    for echo in range(20):
        start_time = timeit.default_timer()
        reward_baseline = []
        cost_this_turn = 0.0

        trick_num = 0
        for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
            
            #trick_num += 1
            #if trick_num < 80:
            #    continue
        
            if add2train:
                if random.randint(1,200) == 100:
                    train4test.append((train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain))
                    if len(train4test) == 5:
                        add2train = False

            this_reward = 0.0

            #for train_batch, mask_batch, action_batch, reward_batch in policy_network.generate_policy_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
            for single, train, action, reward in policy_network.generate_policy_case(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model):
                #this_reward = reward_batch

                #reward_b = 0 if len(reward_baseline) < 1 else float(sum(reward_baseline))/float(len(reward_baseline))
                #norm_reward = numpy.array(reward_batch) - reward_b

                #cost_this_turn += network_model.train_step(train_batch,mask_batch,action_batch,norm_reward,0.0001)[0]
                cost_this_turn += network_model.train_step(single,train,action,reward,0.0001)[0]
        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:",cost_this_turn
        print >> sys.stderr, "TRAINING Use %.3f seconds"%(end_time-start_time)
        
        #reward_baseline.append(this_reward)
        #if len(reward_baseline) >= 32:
        #    reward_baselin = reward_baseline[1:]

        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()
        for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain,network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **"%echo
        print "TRAIN"
        mp,mr,mf = evaluation.evaluate_documents(train_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(train_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(train_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print

        ## dev
        dev_docs_for_test = []
        start_time = timeit.default_timer()
        for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
            ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
            dev_docs_for_test.append(ev_doc)
        print "DEV"
        mp,mr,mf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(dev_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print 

        end_time = timeit.default_timer()
        print >> sys.stderr, "DEV Use %.3f seconds"%(end_time-start_time)
        sys.stdout.flush()
    
        ## test
        test_docs_for_test = []
        start_time = timeit.default_timer()
        for test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain in DataGenerate.array_generater(test_docs,"test",w2v):
            ev_doc = policy_network.generate_policy_test(test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain,network_model)
            test_docs_for_test.append(ev_doc)
        print "TEST"
        mp,mr,mf = evaluation.evaluate_documents(test_docs_for_test,evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f"%(mr,mp,mf)
        bp,br,bf = evaluation.evaluate_documents(test_docs_for_test,evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f"%(br,bp,bf)
        cp,cr,cf = evaluation.evaluate_documents(test_docs_for_test,evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f"%(cr,cp,cf)
        print 

        end_time = timeit.default_timer()
        print >> sys.stderr, "TEST Use %.3f seconds"%(end_time-start_time)
        sys.stdout.flush()

        save_f = file('./model/nets/network_model.%s.%d'%(args.language,echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()
Example #10
0
def main():

    embedding_dir = args.embedding + args.language

    print >> sys.stderr, "Read Embedding from %s ..." % embedding_dir
    embedding_dimention = 50
    if args.language == "cn":
        embedding_dimention = 64
    w2v = word2vec.Word2Vec(embedding_dir, embedding_dimention)

    #network_model
    #net_dir = "./model/pretrain/network_model_pretrain.cn.19"
    net_dir = "./model/nets/network_model.cn.2"
    if os.path.isfile("./model/network_model." + args.language):
        #read_f = file('./model/network_model.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.'+args.language, 'rb')
        #read_f = file('./model/network_model_pretrain.cn.best', 'rb')
        read_f = file(net_dir, 'rb')
        network_model = cPickle.load(read_f)
        print >> sys.stderr, "Read model from ./model/network_model." + args.language
    else:
        inpt_dimention = 1738
        single_dimention = 855
        if args.language == "en":
            inpt_dimention = 1374
            single_dimention = 673

        network_model = network.NetWork(inpt_dimention, single_dimention, 1000)
        print >> sys.stderr, "save model ..."
        save_f = file('./model/network_model.' + args.language, 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

    train_docs = DataGenerate.doc_data_generater("train")
    dev_docs = DataGenerate.doc_data_generater("dev")
    test_docs = DataGenerate.doc_data_generater("test")

    #pretrain
    l2_lambda = 0.0000003
    lr = 0.00002
    ce_lambda = 0.005

    times = 0
    for echo in range(0):

        start_time = timeit.default_timer()
        print "Pretrain ECHO:", echo
        cost_this_turn = 0.0
        #print >> sys.stderr, network_model.get_weight_sum()
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                #cost_this_turn += network_model.pre_train_step(single_mention_array,train_list,lable_list,0.0001)[0]
                cost_this_turn += network_model.pre_train_step(
                    single_mention_array, train_list, lable_list, lr,
                    l2_lambda)[0]
                #cost_this_turn += network_model.pre_top_train_step(single_mention_array,train_list,lable_list,lr,l2_lambda)[0]

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)

    for echo in range(0):
        start_time = timeit.default_timer()
        cost_this_turn = 0.0
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):
            if len(cases) >= 700:
                continue
            for single_mention_array, train_list, lable_list in pretrain.generate_pretrain_case(
                    cases, gold_chain, network_model):
                cost_this_turn += network_model.pre_ce_train_step(
                    single_mention_array, train_list, lable_list, lr,
                    l2_lambda, ce_lambda)[0]

        save_f = file(
            './model/pretrain/network_model_pretrain.%s.%d' %
            (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()

        end_time = timeit.default_timer()
        print >> sys.stderr, "PreTrain", echo, "Total cost:", cost_this_turn
        print >> sys.stderr, "PreTRAINING Use %.3f seconds" % (end_time -
                                                               start_time)

    print >> sys.stderr, "Begin test on DEV after pertraining"

    ## test performance after pretraining
    dev_docs_for_test = []
    num = 0
    #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
    #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
    for cases, gold_chain in DataGenerate.case_generater(dev_docs, "dev", w2v):
        ev_doc = policy_network.generate_policy_test(cases, gold_chain,
                                                     network_model)
        dev_docs_for_test.append(ev_doc)
    print "Performance on DEV after PreTRAINING"
    mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.muc)
    print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
    bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.b_cubed)
    print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
    cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                               evaluation.ceafe)
    print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
    print "#################################################"
    sys.stdout.flush()
    print >> sys.stderr, "Pre Train done"

    ##train
    train4test = []  # add 5 items for testing the training performance
    add2train = True

    #lr = 0.000002
    lr = 0.000009
    ce_lmbda = 0.0000001
    l2_lambda = 0.000001

    for echo in range(30):

        if (echo + 1) % 10 == 0:
            lr = lr * 0.6

        start_time = timeit.default_timer()
        reward_baseline = []
        cost_this_turn = 0.0
        average_reward = 0.0
        done_case_num = 0

        #for train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain in DataGenerate.array_generater(train_docs,"train",w2v):
        for cases, gold_chain in DataGenerate.case_generater(
                train_docs, "train", w2v):

            if add2train:
                if random.randint(1, 200) == 10:
                    #if not random.randint(1,200) == 10:
                    #train4test.append((train_doc_mention_array,train_doc_pair_array,train_doc_gold_chain))
                    train4test.append((cases, gold_chain))
                    if len(train4test) == 50:
                        add2train = False

            this_reward = 0.0
            reward_b = 0 if len(reward_baseline) < 1 else float(
                sum(reward_baseline)) / float(len(reward_baseline))

            for single, train, action, reward in policy_network.generate_policy_case(
                    cases, gold_chain, network_model):
                #for single, train, action, reward , acp in policy_network.generate_policy_case_trick(cases,gold_chain,network_model):

                norm_reward = reward - reward_b

                this_reward = reward

                #cost_this_turn += network_model.train_step(single,train,action,reward,0.00001)[0]
                #cost_this_turn += network_model.train_step(single,train,action,norm_reward,0.000003)[0]
                this_cost = network_model.train_step(single, train, action,
                                                     reward, lr, l2_lambda,
                                                     ce_lmbda)[0]
                #print this_cost,acp,reward
                cost_this_turn += this_cost

            average_reward += this_reward
            done_case_num += 1

            #if done_case_num >= 1:
            #    break

        print network_model.get_weight_sum()
        end_time = timeit.default_timer()
        print >> sys.stderr, "Total cost:", cost_this_turn
        print >> sys.stderr, "Average Reward:", average_reward / float(
            done_case_num)
        print >> sys.stderr, "TRAINING Use %.3f seconds" % (end_time -
                                                            start_time)

        reward_baseline.append(this_reward)
        if len(reward_baseline) >= 64:
            reward_baselin = reward_baseline[1:]

        ## test training performance
        train_docs_for_test = []
        start_time = timeit.default_timer()

        for train_cases, train_doc_gold_chain in train4test:
            ev_doc = policy_network.generate_policy_test(
                train_cases, train_doc_gold_chain, network_model)
            train_docs_for_test.append(ev_doc)
        print "** Echo: %d **" % echo
        print "TRAIN"
        mp, mr, mf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(train_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        ## dev
        dev_docs_for_test = []
        start_time = timeit.default_timer()
        #for dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain in DataGenerate.array_generater(dev_docs,"dev",w2v):
        #ev_doc = policy_network.generate_policy_test(dev_doc_mention_array,dev_doc_pair_array,dev_doc_gold_chain,network_model)
        for dev_cases, dev_doc_gold_chain in DataGenerate.case_generater(
                dev_docs, "dev", w2v):
            ev_doc = policy_network.generate_policy_test(
                dev_cases, dev_doc_gold_chain, network_model)
            dev_docs_for_test.append(ev_doc)
        print "DEV"
        mp, mr, mf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(dev_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        end_time = timeit.default_timer()
        print >> sys.stderr, "DEV Use %.3f seconds" % (end_time - start_time)
        sys.stdout.flush()

        ## test
        test_docs_for_test = []
        start_time = timeit.default_timer()
        #for test_doc_mention_array,test_doc_pair_array,test_doc_gold_chain in DataGenerate.array_generater(test_docs,"test",w2v):
        for test_cases, test_doc_gold_chain in DataGenerate.case_generater(
                test_docs, "test", w2v):
            ev_doc = policy_network.generate_policy_test(
                test_cases, test_doc_gold_chain, network_model)
            test_docs_for_test.append(ev_doc)
        print "TEST"
        mp, mr, mf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.muc)
        print "MUC: recall: %f precision: %f  f1: %f" % (mr, mp, mf)
        bp, br, bf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.b_cubed)
        print "BCUBED: recall: %f precision: %f  f1: %f" % (br, bp, bf)
        cp, cr, cf = evaluation.evaluate_documents(test_docs_for_test,
                                                   evaluation.ceafe)
        print "CEAF: recall: %f precision: %f  f1: %f" % (cr, cp, cf)
        print

        end_time = timeit.default_timer()
        print >> sys.stderr, "TEST Use %.3f seconds" % (end_time - start_time)
        sys.stdout.flush()

        save_f = file(
            './model/nets/network_model.%s.%d' % (args.language, echo), 'wb')
        cPickle.dump(network_model, save_f, protocol=cPickle.HIGHEST_PROTOCOL)
        save_f.close()