コード例 #1
0
def eger(cfg):
    gen = data_gen.get_batch(batch_size=cfg.batch_size)


    images, true_box, true_label = next(gen)
    print(true_label)
    loct, conft = np_utils.get_loc_conf(true_box, true_label, batch_size=cfg.batch_size)
    get_loss(images, conft, loct)
コード例 #2
0
def train():
    img = tf.placeholder(shape=[config.batch_size, config.Config['min_dim'], config.Config['min_dim'], 3], dtype=tf.float32)
    #ig = AddCoords(x_dim=512,y_dim=512)(img)
    anchors_num = sum(
        [config.Config['feature_maps'][s] ** 2 * config.Config['aspect_num'][s] for s in range(5)])
    loc = tf.placeholder(shape=[config.batch_size, anchors_num, 4], dtype=tf.float32)
    conf = tf.placeholder(shape=[config.batch_size, anchors_num], dtype=tf.float32)
    pred_loc, pred_confs, vbs = retinanet.model(img,config)
    train_tensors = get_loss(conf, loc, pred_loc, pred_confs,config)
    gen = data_gen.get_batch_inception(batch_size=config.batch_size,image_size=config.Config['min_dim'],max_detect=50)

    global_step = slim.get_or_create_global_step()
    lr = tf.train.exponential_decay(
        learning_rate=0.001,
        global_step=global_step,
        decay_steps=40000,
        decay_rate=0.7,
        staircase=True)

    tf.summary.scalar('lr', lr)
    sum_op = tf.summary.merge_all()

    optimizer = tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.9)
    train_op = slim.learning.create_train_op(train_tensors, optimizer)
    vbs = []
    for s in slim.get_variables():
        print(s.name)
        if 'resnet_v2_50' in s.name and 'Momentum' not in s.name:
            print(s.name)
            vbs.append(s)

    saver = tf.train.Saver(vbs)

    def restore(sess):
        saver.restore(sess, config.check_dir)


    sv = tf.train.Supervisor(logdir=config.save_dir, summary_op=None, init_fn=restore)

    with sv.managed_session() as sess:
        for step in range(200000):
            print('       '+' '.join(['*']*(step%10)))
            images, true_box, true_label = q.get()

            loct, conft = np_utils.get_loc_conf(true_box, true_label, batch_size=config.batch_size,cfg=config.Config)
            feed_dict = {img: images, loc: loct,
                         conf: conft}

            ls, step = sess.run([train_op, global_step], feed_dict=feed_dict)

            if step % 10 == 0:
                print('step:' + str(step) +
                      ' ' + 'class_loss:' + str(ls[0]) +
                      ' ' + 'loc_loss:' + str(ls[1])
                      )
                summaries = sess.run(sum_op, feed_dict=feed_dict)
                sv.summary_computed(sess, summaries)
コード例 #3
0
def train():
    img = tf.placeholder(shape=[
        config.batch_size, config.Config['min_dim'], config.Config['min_dim'],
        3
    ],
                         dtype=tf.float32)
    anchors_num = sum([
        config.Config['feature_maps'][s]**2 * config.Config['aspect_num'][s]
        for s in range(6)
    ])

    loc = tf.placeholder(shape=[config.batch_size, anchors_num, 4],
                         dtype=tf.float32)
    conf = tf.placeholder(shape=[config.batch_size, anchors_num],
                          dtype=tf.float32)

    pred_loc, pred_confs, vbs = inception_500_ince.inception_v2_ssd(
        img, config)

    train_tensors, sum_op = get_loss(conf, loc, pred_loc, pred_confs, config)

    gen = data_gen.get_batch_inception(batch_size=config.batch_size,
                                       image_size=config.Config['min_dim'],
                                       max_detect=50)
    optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
    train_op = slim.learning.create_train_op(train_tensors, optimizer)

    saver = tf.train.Saver(vbs)

    def restore(sess):
        saver.restore(sess, '/home/dsl/all_check/inception_v2.ckpt')

    sv = tf.train.Supervisor(logdir='/home/dsl/all_check/face_detect/voc-1',
                             summary_op=None,
                             init_fn=restore)

    with sv.managed_session() as sess:
        for step in range(1000000000):

            images, true_box, true_label = q.get()

            loct, conft = np_utils.get_loc_conf(true_box,
                                                true_label,
                                                batch_size=config.batch_size,
                                                cfg=config.Config)
            feed_dict = {img: images, loc: loct, conf: conft}

            ls = sess.run(train_op, feed_dict=feed_dict)
            if step % 10 == 0:
                summaries = sess.run(sum_op, feed_dict=feed_dict)
                sv.summary_computed(sess, summaries)
                print(ls)
コード例 #4
0
def train():
    img = tf.placeholder(shape=[config.batch_size, config.Config['min_dim'], config.Config['min_dim'], 3], dtype=tf.float32)
    anchors_num = sum(
        [config.Config['feature_maps'][s] ** 2 * config.Config['aspect_num'][s] for s in range(6)])

    loc = tf.placeholder(shape=[config.batch_size, anchors_num, 4], dtype=tf.float32)
    conf = tf.placeholder(shape=[config.batch_size, anchors_num], dtype=tf.float32)

    pred_loc, pred_confs, vbs = inceptionv3_500_ince.inception_v2_ssd(img,config)


    train_tensors = get_loss(conf, loc, pred_loc, pred_confs,config)
    global_step = get_or_create_global_step()

    # Define your exponentially decaying learning rate
    lr = tf.train.exponential_decay(
        learning_rate=0.001,
        global_step=global_step,
        decay_steps=20000,
        decay_rate=0.7,
        staircase=True)
    tf.summary.scalar('lr',lr)
    sum_op = tf.summary.merge_all()

    gen = data_gen.get_batch_inception(batch_size=config.batch_size,image_size=config.Config['min_dim'],max_detect=50)
    optimizer = tf.train.MomentumOptimizer(learning_rate=lr,momentum=0.9)
    train_op = slim.learning.create_train_op(train_tensors, optimizer)

    saver = tf.train.Saver(vbs)

    def restore(sess):
        saver.restore(sess, '/home/dsl/all_check/inception_v3.ckpt')

    sv = tf.train.Supervisor(logdir='/home/dsl/all_check/face_detect/voc-v32', summary_op=None, init_fn=restore)

    with sv.managed_session() as sess:
        for step in range(1000000000):

            images, true_box, true_label = q.get()

            loct, conft = np_utils.get_loc_conf(true_box, true_label, batch_size=config.batch_size,cfg=config.Config)
            feed_dict = {img: images, loc: loct,
                         conf: conft}
            t = time.time()
            ls,step = sess.run([train_op,global_step], feed_dict=feed_dict)
            if step % 10 == 0:
                print(time.time()-t)
                summaries = sess.run(sum_op, feed_dict=feed_dict)
                sv.summary_computed(sess, summaries)
                print(ls)
コード例 #5
0
ファイル: train.py プロジェクト: dsl2009/tf-dsl-obj-detect
def train():
    img = tf.placeholder(
        shape=[config.batch_size, cfg["min_dim"], cfg["min_dim"], 3], dtype=tf.float32
    )
    anchors_num = sum(
        [cfg["feature_maps"][s] ** 2 * cfg["aspect_num"][s] for s in range(6)]
    )

    loc = tf.placeholder(shape=[config.batch_size, anchors_num, 4], dtype=tf.float32)
    conf = tf.placeholder(shape=[config.batch_size, anchors_num], dtype=tf.float32)

    pred_loc, pred_confs, vbs = mobile.nana_mobile(img, config)

    train_tensors, sum_op = get_loss(conf, loc, pred_loc, pred_confs, config)

    gen = data_gen.get_batch(batch_size=config.batch_size, image_size=cfg["min_dim"])
    optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
    train_op = slim.learning.create_train_op(train_tensors, optimizer)

    saver = tf.train.Saver(vbs)

    def restore(sess):
        saver.restore(sess, "/home/dsl/all_check/nasnet-a_mobile_04_10_2017/model.ckpt")

    sv = tf.train.Supervisor(
        logdir="/home/dsl/all_check/face_detect/nana", summary_op=None, init_fn=restore
    )

    with sv.managed_session() as sess:
        for step in range(1000000000):

            images, true_box, true_label = next(gen)
            loct, conft = np_utils.get_loc_conf(
                true_box, true_label, batch_size=config.batch_size, cfg=cfg
            )
            feed_dict = {img: images, loc: loct, conf: conft}

            ls = sess.run(train_op, feed_dict=feed_dict)
            if step % 10 == 0:
                summaries = sess.run(sum_op, feed_dict=feed_dict)
                sv.summary_computed(sess, summaries)
                print(ls)
コード例 #6
0
def train(cfg):
    img = tf.placeholder(shape=[cfg.batch_size, 300, 300, 3], dtype=tf.float32)
    #boxs = tf.placeholder(shape=[batch_size, 50, 4], dtype=tf.float32)
    #label = tf.placeholder(shape=[batch_size, 50], dtype=tf.int32)
    loc = tf.placeholder(shape=[cfg.batch_size, 7512,4], dtype=tf.float32)
    conf =  tf.placeholder(shape=[cfg.batch_size, 7512], dtype=tf.float32)

    pred_loc, pred_confs, vbs = model(img)

    train_tensors,sum_op = get_loss(conf,loc,pred_loc, pred_confs)

    gen = data_gen.get_batch(batch_size=cfg.batch_size)
    optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
    train_op = slim.learning.create_train_op(train_tensors, optimizer)


    saver = tf.train.Saver(vbs)
    def restore(sess):
        saver.restore(sess,'/home/dsl/all_check/vgg_16.ckpt')

    sv = tf.train.Supervisor(logdir='/home/dsl/all_check/face_detect', summary_op=None, init_fn=restore)

    with sv.managed_session() as sess:
        for step in range(1000000000):

            images, true_box, true_label = next(gen)
            loct,conft = np_utils.get_loc_conf(true_box,true_label,batch_size=cfg.batch_size)
            feed_dict = {img: images, loc: loct,
                         conf: conft}

            ls = sess.run(train_op, feed_dict=feed_dict)
            if step%10==0:
                summaries = sess.run(sum_op,feed_dict=feed_dict)
                sv.summary_computed(sess, summaries)
                print(ls)
#train()
#tf.enable_eager_execution()
#eger()

#detect('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/VOCdevkit/VOCdevkit/VOC2007/JPEGImages/000133.jpg')
#video()