示例#1
0
def run_evaluate():
    train_list = []
    test_list = []
    config = Config()
    train_list = load_img_id_list(config.train_file_list)
    test_list = load_img_id_list(config.test_file_list)

    #Directory to save model Info
    config.save_path = config.save_path + '_' + args.model_name
    restore_id = args.restore_id
    config.knowledge = args.knowledge
    assert (os.path.isdir(config.save_path))

    #Log File
    log_file = config.log_file + '_' + args.model_name + '.log'
    assert (restore_id > 0)

    #Initialize the paths and parameters for the current dataset
    cur_dataset = dataprovider(train_list,
                               test_list,
                               config.img_feat_dir,
                               config.global_feat_dir,
                               config.sen_dir,
                               config.vocab_size,
                               knowledge=config.knowledge,
                               phrase_len=config.phrase_len,
                               batch_size=config.batch_size)
    is_train = False
    #Initialize ground model train instance
    model = ground_model(is_train, config)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)

    with tf.Graph().as_default():
        #Build the model
        total_loss, train_op, att_logits, dec_logits, w_loss, l_loss, v_loss = model.build_model(
        )
        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # Run the Op to initialize the variables.
        init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver(max_to_keep=200)
        feed_dict = update_feed_dict(cur_dataset, model, is_train=is_train)

        # initialize_from_pretrain(sess, config, args.pretrain_id)
        saver.restore(
            sess, './model/%s/model_%d.ckpt' % (config.save_path, restore_id))

        print "-----------------------------------------------"
        eval_accu = run_eval(sess, cur_dataset, model, att_logits, feed_dict)
        print "-----------------------------------------------"
示例#2
0
def run_evaluate():
    train_list = []
    test_list = []
    config = Config()
    train_list = load_img_id_list(config.train_file_list)
    test_list = load_img_id_list(config.test_file_list)

    config.save_path = config.save_path + '_' + args.model_name
    assert (os.path.isdir(config.save_path))

    config.hidden_size = 500
    config.is_multi = True
    config.reward_con = args.reward_con
    restore_id = args.restore_id
    assert (restore_id > 0)

    cur_dataset = dataprovider(train_list,
                               test_list,
                               config.img_feat_dir,
                               config.sen_dir,
                               config.vocab_size,
                               reward_con=config.reward_con,
                               batch_size=config.batch_size)

    model = ground_model(config)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)

    with tf.Graph().as_default():
        loss, loss_vec, logits, rwd_pred, loss_rwd = model.build_model()
        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # Run the Op to initialize the variables.
        saver = tf.train.Saver(max_to_keep=20)
        feed_dict = update_feed_dict(cur_dataset, model, False)

        print 'Restore model_%d' % restore_id
        cur_dataset.is_save = False
        saver.restore(
            sess, './model/%s/model_%d.ckpt' % (config.save_path, restore_id))

        print "-----------------------------------------------"
        eval_accu = run_eval(sess, cur_dataset, model, logits, feed_dict)
        print "-----------------------------------------------"
示例#3
0
文件: run.py 项目: wpyemperor/swc_cnn
#from pyzem.swc import swc
import swc
import dataprovider

import os, os.path
import numpy as np
import scipy.misc
import transfer
print('start data preprocessing')
tree = swc.SwcTree()
dp = dataprovider.dataprovider()
dp.load(tree)
dp.get_level_list()
dp.get_length_t()
dp.get_radius_t()
dp.get_plocation_t()
dp.get_angle_t()
dp.get_type_t()
dp.tran_file('../celldata/GABAergic')
dp.tran_file('../celldata/granule01')
dp.tran_file('../celldata/nitrergic')
dp.tran_file('../celldata/pyrimidal')
print("step1 complete!")
pathdir = [
    'data/GABAergic', 'data/granule01', 'data/nitrergic', 'data/pyrimidal'
]
for i in range(0, len(pathdir)):
    filelist = os.listdir(pathdir[i])
    for j in range(0, len(filelist)):
        path = os.path.join(pathdir[i], filelist[j])
        if os.path.isfile(path):
示例#4
0
文件: train.py 项目: zr8091/KAC-Net
def run_training():
    train_list = []
    test_list = []
    config = Config()
    train_list = load_img_id_list(config.train_file_list)
    test_list = load_img_id_list(config.test_file_list)

    #Directory to save model Info
    config.save_path = config.save_path + '_' + args.model_name
    restore_id = args.restore_id
    config.knowledge = args.knowledge
    if restore_id > 0:
        config.save_path = config.save_path + '_restore_%d' % restore_id
    if not os.path.isdir(config.save_path):
        print 'Save models into %s' % config.save_path
        os.mkdir(config.save_path)

    #Log File
    log_file = config.log_file + '_' + args.model_name + '.log'
    if restore_id > 0:
        log_file = config.log_file + '_' + args.model_name + '_restore_%d.log' % restore_id
    log = open(log_file, 'w', 0)

    #Initialize the paths and parameters for the current dataset
    cur_dataset = dataprovider(train_list,
                               test_list,
                               config.img_feat_dir,
                               config.global_feat_dir,
                               config.sen_dir,
                               config.vocab_size,
                               knowledge=config.knowledge,
                               phrase_len=config.phrase_len,
                               batch_size=config.batch_size)
    is_train = True
    #Initialize ground model train instance
    model = ground_model(is_train, config)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)

    with tf.Graph().as_default():
        #Build the model
        total_loss, train_op, att_logits, dec_logits, w_loss, l_loss, v_loss = model.build_model(
        )
        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # Run the Op to initialize the variables.
        init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver(max_to_keep=200)
        duration = 0.0

        initialize_from_pretrain(sess, config, args.pretrain_id)

        if restore_id > 0:
            saver.restore(
                sess,
                './model/%s/model_%d.ckpt' % (config.save_path, restore_id))

        for step in xrange(config.max_step):
            start_time = time.time()
            feed_dict = update_feed_dict(cur_dataset, model, is_train)

            _, cur_tot_loss, cur_l_loss, cur_v_loss, cur_logits = sess.run(
                [train_op, total_loss, l_loss, v_loss, att_logits],
                feed_dict=feed_dict)
            duration += time.time() - start_time

            if cur_dataset.is_save:
                print 'Save model_%d into %s' % (cur_dataset.epoch_id,
                                                 config.save_path)
                saver.save(
                    sess, '%s/model_%d.ckpt' %
                    (config.save_path, cur_dataset.epoch_id))
                cur_dataset.is_save = False

            if step % 10 == 0:
                cur_accu = eval_cur_batch(feed_dict[model.bbx_label],
                                          cur_logits,
                                          is_train=is_train)
                print 'Step %d: loss = %.4f, l_loss = %.4f, v_loss = %.4f, accu = %.4f (%.4f s)' % (
                    step, cur_tot_loss, cur_l_loss, cur_v_loss, cur_accu,
                    duration / 10.0)

                duration = 0.0

            if (step % 600) == 0:
                print "-----------------------------------------------"
                eval_accu = run_eval(sess, cur_dataset, model, att_logits,
                                     feed_dict)
                log.write('%d/%d: %.4f, %.4f, %.4f, %.4f\n' %
                          (step + 1, cur_dataset.epoch_id, cur_tot_loss,
                           cur_l_loss, cur_v_loss, eval_accu))
                print "-----------------------------------------------"
                model.batch_size = config.batch_size
                cur_dataset.is_save = False

    log.close()
示例#5
0
class Tester:
    def __init__(self, model):
        self.model = model


device_name = tf.test.gpu_device_name()
if device_name == '/device:GPU:0':
    print('Found GPU at:{}!'.format(device_name))
else:
    print('Tensorflow have not found GPU,run on CPU mode!')

train_set = [".\\tfrecord\\train.tfrecord"]
val_set = [".\\tfrecord\\val.tfrecord"]

from dataprovider import dataprovider

# create dateprovide for model
dataset_train = dataprovider(train_set, 61, 1037, [224, 224, 3], 32739, 40)
dataset_val = dataprovider(val_set, 61, 100, [224, 224, 3], 4982, 40)

import vgg16
import vgg19

tester = Tester(
    vgg16.VGG16(dataset_train, dataset_val, 0.01, 400,
                os.path.join('.', 'checkpoints', 'vgg_16.ckpt')))
#tester = Tester(vgg19.VGG19(dataset,0.01,400,".\\checkpoints\\vgg_19.ckpt",))

tester.model.train_all_epochs()
示例#6
0
def run_training():
    train_list = []
    test_list = []
    config = Config()
    train_list = load_img_id_list(config.train_file_list)
    test_list = load_img_id_list(config.test_file_list)

    config.save_path = config.save_path + '_' + args.model_name
    if not os.path.isdir(config.save_path):
        print 'Save models into %s'%config.save_path
        os.mkdir(config.save_path)
    log_file = config.log_file + '_' + args.model_name + '.log'
    config.hidden_size = 500
    config.is_multi = True
    config.reward_con = args.reward_con
    log = open(log_file, 'w', 0)
    restore_id = args.restore_id

    cur_dataset = dataprovider(train_list, test_list, config.img_feat_dir, config.sen_dir, config.vocab_size,
                                reward_con=config.reward_con, batch_size=config.batch_size)

    model = ground_model(config)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)

    with tf.Graph().as_default():
        loss, loss_vec, logits, rwd_pred, loss_rwd = model.build_model()
        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # Run the Op to initialize the variables.
        saver = tf.train.Saver(max_to_keep=20)
        duration = 0.0

        if restore_id > 0:
            print 'Restore model_%d'%restore_id
            # cur_dataset.epoch_id = restore_id
            cur_dataset.is_save = False
            temp_vars = set(tf.global_variables())
            saver.restore(sess, './model/%s/model_%d.ckpt'%(config.save_path, restore_id))   

            model.train_op = model.build_train_op(loss, loss_vec, model.reward_w, rwd_pred) 
            train_op = model.train_op 
            sess.run(tf.variables_initializer(set(tf.global_variables()) - temp_vars))      
        else:
            print 'Train from scratch'
            cur_dataset.is_save = False
            model.train_op = model.build_train_op(loss, loss_vec, loss_rwd, rwd_pred) 
            train_op = model.train_op
            init = tf.global_variables_initializer()
            sess.run(init)   

        for step in xrange(config.max_step):
            start_time = time.time()
            feed_dict = update_feed_dict(cur_dataset, model, True)
            _,loss_value,loss_vec_value, cur_logits = sess.run([train_op, loss, loss_vec, logits], feed_dict=feed_dict)
            duration += time.time()-start_time

            if cur_dataset.is_save:
                print 'Save model_%d into %s'%(cur_dataset.epoch_id, config.save_path)
                saver.save(sess, '%s/model_%d.ckpt'%(config.save_path, cur_dataset.epoch_id))
                cur_dataset.is_save = False

            if step%10 == 0:
                cur_accu = eval_cur_batch(feed_dict[model.bbx_label], cur_logits, True)
                print 'Step %d: loss = %.4f, accu = %.4f (%.4f sec)'%(step, loss_value, cur_accu, duration/10.0)                
                duration = 0.0

            if ((step)%600)==0:
                print "-----------------------------------------------"
                eval_accu = run_eval(sess, cur_dataset, model, logits, feed_dict)
                log.write('%d/%d: %.4f, %.4f\n'%(step+1, cur_dataset.epoch_id, loss_value, eval_accu))
                print "-----------------------------------------------"
                model.batch_size = config.batch_size
                cur_dataset.is_save = False
           
    log.close()