Пример #1
0
# 	sess.run(loss, feed_dict={inputs:images, b_batch_:b_batch, labels_:labels})

with tf.Session() as sess:
    train_writer = tf.summary.FileWriter('/tmp/yolo-tf/train/train',
                                         sess.graph)
    merged = tf.summary.merge_all()

    sess.run(tf.global_variables_initializer())
    for i in range(config["EPOCH_SIZE"]):
        print("epoch number :{}".format(i))
        for j in range(
                int(
                    len(open(dataset_path + "train.txt", "r").readlines()) /
                    config["BATCH_SIZE"])):
            print("doing stuff on {}th batch".format(j))
            images, b_batch, labels = loader.next_batch(config["BATCH_SIZE"],
                                                        print_img_files=False)
            summary = sess.run([merged, train_step],
                               feed_dict={
                                   inputs: images,
                                   b_batch_: b_batch,
                                   labels_: labels
                               })
            l = sess.run(loss,
                         feed_dict={
                             inputs: images,
                             b_batch_: b_batch,
                             labels_: labels
                         })
            if np.isnan(l):
                exit(0)
            train_writer.add_summary(summary[0], j)
Пример #2
0
if sys.argv[1] == "train":
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter('/tmp/yolo-tf/train/train',
                                             sess.graph)
        merged = tf.summary.merge_all()

        sess.run(tf.global_variables_initializer())
        for i in range(config["EPOCH_SIZE"]):
            print("epoch number :{}".format(i))
            for j in range(
                    int(
                        len(open(dataset_path + "train.txt", "r").readlines())
                        / config["BATCH_SIZE"])):
                print("doing stuff on {}th batch".format(j))
                images, labels_ = loader.next_batch(config["BATCH_SIZE"],
                                                    print_img_files=False)
                summary = sess.run([merged, train_step],
                                   feed_dict={
                                       x: images,
                                       labels: labels_
                                   })
                # summary = sess.run([train_step], feed_dict={x:images,labels:labels_})
                ls_val = sess.run(ls, feed_dict={x: images, labels: labels_})
                print("loss : {}".format(ls_val))
                train_writer.add_summary(summary[0], j)
            loader.set_batch_ptr(0)
            if i % 100 == 0:
                save_path = saver.save(
                    sess,
                    config["MODEL_SAVE_PATH"] + "model_{}.ckpt".format(i))
                print("Model at {} epoch saved at {}".format(i, save_path))
Пример #3
0
import config.parameters as p
from loss import losses
from data.loader import Loader

dataset_path = sys.argv[1]
config = p.getParams()
loader = Loader(dataset_path, config, "bbox")

arch = Arch(config)
preds = arch.darknet()
x = arch.getX()
saver = tf.train.Saver()

if not os.path.exists(config["MODEL_SAVE_PATH"]):
		os.mkdir(config["MODEL_SAVE_PATH"])

with tf.Session() as sess:
	for i in range(config["EPOCH_SIZE"]):
		for j in range(int(len(open(dataset_path+"train.txt","r").readlines())/config["BATCH_SIZE"])):			
			print ("doing stuff on {}th batch".format(j))
			images,labels = loader.next_batch(config["BATCH_SIZE"])
			ls = losses.yolo_loss(preds, config, labels)
			train_step = tf.train.AdamOptimizer(1e-4).minimize(ls)
			sess.run(tf.global_variables_initializer())
			sess.run(train_step, feed_dict={x:images})
			ls_val = sess.run(ls, feed_dict={x:images})
			print ("loss : {}".format(ls_val))
		if i%100 == 0:
			save_path = saver.save(sess, config["MODEL_SAVE_PATH"]+"model_{}.ckpt".format(i))
			print ("Model at {} epoch saved at {}".format(i, save_path))