예제 #1
0
def load_train_val_data(train_path, val_path):
    parser_train = Parser(ANCHORS, NUM_CLASSES, image_size=(416, 416))
    parser_test = Parser(ANCHORS, NUM_CLASSES)
    trainset = dataset(parser_train,
                       train_path,
                       BATCH_SIZE,
                       shuffle=SHUFFLE_SIZE,
                       multi_image_size=False)
    testset = dataset(parser_test, val_path, BATCH_SIZE, shuffle=None)
    return trainset, testset
예제 #2
0
파일: train.py 프로젝트: BadUncleBoy/YoloAt
def network_input( choice):
    parser         = Parser(int(choice))
    trainset       = dataset(parser, Config.Train_tfrecord, Config.BATCH_SIZE, shuffle=Config.SHUFFLE_SIZE)
    validset       = dataset(parser, Config.Valid_tfrecord, 1, shuffle=None)
    example        = tf.cond(is_training, lambda: trainset.get_next(), lambda: validset.get_next())
    images, y_true = example
    return images, y_true
예제 #3
0
    def show_example(self):
        sess = tf.Session()
        classes = os.listdir(self.source_dir[:-len(".tfrecords") or None])

        train_tfrecord = self.source_dir
        anchors = utils.get_anchors(self.anchor_dir, self.img_h, self.img_w)

        parser = Parser(image_h=self.img_h,
                        image_w=self.img_w,
                        anchors=anchors,
                        num_classes=self.num_classes,
                        cell_size=self.cell_size,
                        debug=True)
        trainset = dataset(parser, train_tfrecord, 1, shuffle=1)

        is_training = tf.placeholder(tf.bool)
        example = trainset.get_next()

        image, boxes = sess.run(example)
        image, boxes = image[0], boxes[0]

        n_box = len(boxes)
        print(boxes)
        image = np.repeat(image, 3, axis=2)
        for i in range(n_box):
            image = cv2.rectangle(
                image, (int(float(boxes[i][0])), int(float(boxes[i][1]))),
                (int(float(boxes[i][2])), int(float(boxes[i][3]))),
                (255, 0, 0), 1)
            label = str(int(float(boxes[i][4])))
            image = cv2.putText(
                image, label,
                (int(float(boxes[i][0])), int(float(boxes[i][1]))),
                cv2.FONT_HERSHEY_SIMPLEX, .6, (0, 255, 0), 1, 2)

        image = Image.fromarray(np.uint8(image * 255))
        image.show()
예제 #4
0
IMAGE_H, IMAGE_W = 416, 416
BATCH_SIZE       = 8
STEPS            = 25000
LR               = 0.001 # if Nan, set 0.0005, 0.0001
DECAY_STEPS      = 100
DECAY_RATE       = 0.9
SHUFFLE_SIZE     = 200
CLASSES          = utils.read_coco_names(class_name_path)
ANCHORS          = utils.get_anchors('data/voc_anchors.txt', IMAGE_H, IMAGE_W)
NUM_CLASSES      = len(CLASSES)
EVAL_INTERNAL    = 100
SAVE_INTERNAL    = 500



parser   = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=SHUFFLE_SIZE)
testset  = dataset(parser, test_tfrecord , BATCH_SIZE, shuffle=None)

is_training = tf.placeholder(tf.bool)
example = tf.cond(is_training, lambda: trainset.get_next(), lambda: testset.get_next())

images, *y_true = example
model = yolov3.yolov3(NUM_CLASSES, ANCHORS)

with tf.variable_scope('yolov3'):
    pred_feature_map = model.forward(images, is_training=is_training)
    loss             = model.compute_loss(pred_feature_map, y_true)
    y_pred           = model.predict(pred_feature_map)

tf.summary.scalar("loss/coord_loss",   loss[1])
import cv2
import numpy as np
import tensorflow as tf
from core import utils
from PIL import Image
from core.dataset import Parser, dataset
sess = tf.Session()

BATCH_SIZE = 1
SHUFFLE_SIZE = 1
CLASSES = utils.read_coco_names('./data/class.names')
ANCHORS = utils.get_anchors('./data/anchors.txt')
NUM_CLASSES = len(CLASSES)
TRAIN_TFRECORD = "./data/train_data/val.tfrecords"
TEST_TFRECORD = "./data/val_data/val.tfrecords"
parser_train = Parser(ANCHORS, NUM_CLASSES)
parser_test = Parser(ANCHORS, NUM_CLASSES)
trainset = dataset(parser_train, TRAIN_TFRECORD, BATCH_SIZE, shuffle=SHUFFLE_SIZE, multi_image_size=False)
testset = dataset(parser_test, TEST_TFRECORD, BATCH_SIZE, shuffle=None)
is_training = tf.placeholder(tf.bool)
example = testset.get_next()

for l in range(1):
    res = sess.run(example)
    image = res[0][0] * 255
    y_true = res[1:]
    boxes = utils.decode_gtbox(y_true)
    n_box = len(boxes)
    for i in range(n_box):
        image = cv2.rectangle(image,(int(float(boxes[i][0])),
                                     int(float(boxes[i][1]))),
import tensorflow as tf
from core import utils
from PIL import Image
from core.dataset import Parser, dataset
sess = tf.Session()

IMAGE_H, IMAGE_W = 416, 416
BATCH_SIZE = 1
SHUFFLE_SIZE = 1

train_tfrecord = "./raccoon_dataset/raccoon_*.tfrecords"
anchors        = utils.get_anchors('./data/raccoon_anchors.txt', IMAGE_H, IMAGE_W)
classes = utils.read_coco_names('./data/raccoon.names')
num_classes = len(classes)

parser   = Parser(IMAGE_H, IMAGE_W, anchors, num_classes, debug=True)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=SHUFFLE_SIZE)

is_training = tf.placeholder(tf.bool)
example = trainset.get_next()

for l in range(10):
    image, boxes = sess.run(example)
    image, boxes = image[0], boxes[0]

    n_box = len(boxes)
    for i in range(n_box):
        image = cv2.rectangle(image,(int(float(boxes[i][0])),
                                     int(float(boxes[i][1]))),
                                    (int(float(boxes[i][2])),
                                     int(float(boxes[i][3]))), (255,0,0), 1)
예제 #7
0
from core.dataset import dataset, Parser
from core import utils

INPUT_SIZE = 416
BATCH_SIZE = 1
EPOCHS = 313
SHUFFLE_SIZE = 1

sess = tf.Session()
classes = utils.read_coco_names('./data/coco.names')
num_classes = len(classes)
train_tfrecord = "../COCO/tfrecords/coco_train0000.tfrecords"
anchors = utils.get_anchors('./data/coco_anchors.txt')

# 检查图片的resize是否正确
parser = Parser(416, 416, anchors, num_classes, debug=True)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=None)
example = trainset.get_next()
# for l in range(100):
image, gt_boxes = sess.run(example)
image, gt_boxes = image[0], gt_boxes[0]

n_box = len(gt_boxes)
for i in range(n_box):
    image = cv2.rectangle(
        image, (int(float(gt_boxes[i][0])), int(float(gt_boxes[i][1]))),
        (int(float(gt_boxes[i][2])), int(float(gt_boxes[i][3]))), (255, 0, 0),
        2)

image = Image.fromarray(np.uint8(image))
예제 #8
0
from core.dataset import dataset, Parser

INPUT_SIZE = 416
BATCH_SIZE = 8
EPOCHS = 20000
LR = 0.0001
SHUFFLE_SIZE = 1

sess = tf.Session()
classes = utils.read_coco_names('./data/raccoon.names')
num_classes = len(classes)
train_tfrecord = "./raccoon_dataset/raccoon*.tfrecords"
anchors = utils.get_anchors('./data/raccoon_anchors.txt')
is_training = tf.placeholder(dtype=tf.bool, name="phase_train")

parser = Parser(416, 416, anchors, num_classes)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=100)
example = trainset.get_next()

images, *y_true = example
model = yolov3.yolov3(num_classes)
with tf.variable_scope('yolov3'):
    y_pred = model.forward(images, is_training=is_training)
    loss = model.compute_loss(y_pred, y_true)

optimizer = tf.train.AdamOptimizer(LR)
saver = tf.train.Saver(max_to_keep=2)

tf.summary.scalar("loss/coord_loss", loss[1])
tf.summary.scalar("loss/sizes_loss", loss[2])
tf.summary.scalar("loss/confs_loss", loss[3])
예제 #9
0
    def train(self):
        ANCHORS = utils.get_anchors(self.anchors_path, self.img_h, self.img_w)

        parser = Parser(image_h=self.img_h,
                        image_w=self.img_w,
                        anchors=ANCHORS,
                        num_classes=self.num_classes)

        trainset = dataset(parser,
                           self.train_records,
                           self.batch_size,
                           shuffle=self.shuffle_size)
        testset = dataset(parser,
                          self.test_records,
                          self.batch_size,
                          shuffle=None)

        is_training = tf.placeholder(tf.bool)

        example = tf.cond(is_training, lambda: trainset.get_next(),
                          lambda: testset.get_next())

        images, y_true = example

        model = yolov3.yolov3(self.num_classes, ANCHORS)

        with tf.variable_scope('yolov3'):

            # Give the images to the network, and receive a prediction
            # feature map
            pred_feature_map = model.forward(images,
                                             is_training=is_training,
                                             n_filters_dn=self.n_filters_dn,
                                             n_strides_dn=self.n_strides_dn,
                                             n_ksizes_dn=self.n_ksizes_dn)

            loss = model.compute_loss(pred_feature_map, y_true,
                                      self.iou_threshold)
            y_pred = model.predict(pred_feature_map)

        tf.summary.scalar("loss/coord_loss", loss[1])
        tf.summary.scalar("loss/sizes_loss", loss[2])
        tf.summary.scalar("loss/confs_loss", loss[3])
        tf.summary.scalar("loss/class_loss", loss[4])

        global_step = tf.Variable(0,
                                  trainable=True,
                                  collections=[tf.GraphKeys.LOCAL_VARIABLES])

        write_op = tf.summary.merge_all()
        writer_train = tf.summary.FileWriter("../../data/train_summary",
                                             sess.graph)
        writer_test = tf.summary.FileWriter("../../data/test_summary")

        update_vars = tf.contrib.framework.get_variables_to_restore(
            include=["yolov3/yolo-v3"])

        lr = tf.train.exponential_decay(self.learning_rate,
                                        global_step,
                                        decay_steps=self.decay_steps,
                                        decay_rate=self.decay_rate,
                                        staircase=True)
        optimizer = tf.train.AdamOptimizer(lr)

        # set dependencies for BN ops
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(loss[0],
                                          var_list=update_vars,
                                          global_step=global_step)

        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])

        saver = tf.train.Saver(max_to_keep=2)

        for step in range(self.steps):
            run_items = sess.run([train_op, write_op, y_pred, y_true] + loss,
                                 feed_dict={is_training: True})

            if (step + 1) % self.eval_internal == 0:
                train_rec_value, train_prec_value = utils.evaluate(
                    run_items[2], run_items[3])

            writer_train.add_summary(run_items[1], global_step=step)
            writer_train.flush()  # Flushes the event file to disk

            if (step + 1) % self.save_internal == 0:
                saver.save(sess,
                           save_path=self.checkpoint_path,
                           global_step=step + 1)

            if (step + 1) % self.print_every_n == 0:
                print(f"=> STEP {step+1} [TRAIN]:\tloss_xy: " +
                      f"{run_items[5]:.4f} \tloss_wh:{run_items[6]:.4f} \t" +
                      f"loss_conf:{run_items[7]:.4f} \tloss_class:" +
                      f"{run_items[8]:.4f}")

            run_items = sess.run([write_op, y_pred, y_true] + loss,
                                 feed_dict={is_training: False})

            if (step + 1) % self.eval_internal == 0:
                test_rec_value, test_prec_value = utils.evaluate(
                    run_items[1], run_items[2])
                print(f"\n{20*'='}> evaluation result <{20*'='}\n")
                print(f"=> STEP {step+1} [TRAIN]:\trecall:" +
                      f"{train_rec_value:.2f} \tprecision:" +
                      f"{train_prec_value:.4f}")
                print(f"=> STEP {step+1} [VALID]:\trecall:" +
                      f"{test_rec_value:.2f} \tprecision:" +
                      f"{test_prec_value:.4f}")
                print(f"\n{20*'='}> evaluation result <{20*'='}\n")

            writer_test.add_summary(run_items[0], global_step=step)
            writer_test.flush()  # Flushes the event file to disk
예제 #10
0
sess = tf.Session()
classes = utils.read_coco_names('./data/raccoon.names')
num_classes = len(classes)
train_tfrecord = "./raccoon_dataset/raccoon*.tfrecords"
anchors = utils.get_anchors('./data/raccoon_anchors.txt')

# 检查图片的resize是否正确
# parser   = Parser(416, 416, anchors, num_classes, debug=True)
# trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=100)
# example  = trainset.get_next()
# for l in range(20):
# image, boxes = sess.run(example)
# image, boxes = image[0], boxes[0]

# n_box = len(boxes)
# for i in range(n_box):
# image = cv2.rectangle(image,(int(float(boxes[i][0])),
# int(float(boxes[i][1]))),
# (int(float(boxes[i][2])),
# int(float(boxes[i][3]))), (255,0,0), 2)

# image = Image.fromarray(np.uint8(image))
# image.show()

# 检查输出的feature_map
parser = Parser(416, 416, anchors, num_classes, debug=False)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=100)

image, y_true_13, y_true_26, y_true_52 = sess.run(trainset.get_next())
예제 #11
0
import tensorflow as tf
from core import utils, yolov3
from core.dataset import dataset, Parser
from basicNet.mobilenetV2 import MobilenetV2
from config.config import *
parser = Parser(ANCHORS, NUM_CLASSES)
trainset = dataset(parser,
                   TEST_TFRECORD,
                   BATCH_SIZE,
                   shuffle=SHUFFLE_SIZE,
                   multi_image_size=False)
testset = dataset(parser, TEST_TFRECORD, BATCH_SIZE, shuffle=None)
example = trainset.get_next()

images, *y_true = example
model = yolov3.yolov3(NUM_CLASSES, ANCHORS, basic_net=MobilenetV2)

with tf.variable_scope('yolov3'):
    model.set_anchor(images)
    pred_feature_map = model.forward(images, is_training=False)
    y_pred = model.predict(pred_feature_map)
saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, "./checkpoint/yolov3.ckpt-25000")
    run_items = sess.run([images, y_pred])
    for i in range(8):
        image = run_items[0][i]
        pred_boxes = run_items[1][0][i:i + 1]
        pred_confs = run_items[1][1][i:i + 1]
        pred_probs = run_items[1][2][i:i + 1]
        pred_boxes, pred_scores, pred_labels = utils.cpu_nms(