Example #1
0
def main(_argv):
    """
    """
    runtime_device = FLAGS.device.lower()
    if runtime_device == 'gpu':
        devices = tf.config.experimental.list_physical_devices('GPU')
        for device in devices:
            tf.config.experimental.set_memory_growth(device, True)
    else:
        devices = tf.config.experimental.list_physical_devices(device_type='CPU')
        tf.config.experimental.set_visible_devices(devices=devices,
                                                   device_type='CPU')

    priors = prior_boxes.PriorBox(cfg).forward()
    model = tf.keras.models.load_model('checkpoints/saved_models')

    cap = cv2.VideoCapture(0)
    while True:
        ret, image = cap.read()
        if not ret:
            continue

        start = time.time()
        dets = detect(image, model, priors)
        end = time.time()
        print('Detect time: {:.2f} {}FPS'.format(end - start,
                                                 int(1 / (end - start))))

        drawn_image = draw_bboxes(image, dets)
        cv2.imshow('face detection', drawn_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Example #2
0
import context

import tensorflow as tf
from utils.box_utils import compute_targets
from config import cfg
from dataset import prior_boxes
from losses import SSDLosses

loss_obj = SSDLosses(cfg['MODEL']['NEG_RATIO'], cfg['MODEL']['NUM_CLASSES'])
priors = prior_boxes.PriorBox(cfg).forward()

bboxes = tf.constant([[0.1, 0.1, 0.2, 0.2], [0.12, 0.12, 0.22, 0.22]],
                     tf.float32)
bboxes = tf.constant([[0.10546875, 0.59183675, 0.11914062, 0.6186896],
                      [0.9003906, 0.5961332, 0.91503906, 0.6165413],
                      [0.9238281, 0.6100967, 0.93847656, 0.632653],
                      [0.9814453, 0.5950591, 0.99609375, 0.6186896],
                      [0.8095703, 0.62406015, 0.82421875, 0.64661646],
                      [0.87890625, 0.6262083, 0.890625, 0.6444683],
                      [0.96191406, 0.71213746, 0.9765625, 0.7346939]],
                     dtype=tf.float32)
labels = tf.constant([1, 1, 1, 1, 1, 1, 1], tf.int64)

# import pdb
# pdb.set_trace()
gt_confs, gt_locs = compute_targets(priors, bboxes, labels)
gt_confs = tf.expand_dims(gt_confs, 0)
gt_locs = tf.expand_dims(gt_locs, 0)
confs = tf.constant([[[0.1, 0.9], [0.7, 0.5]]], tf.float32)
confs = tf.pad(confs, [[0, 0], [0, 5873], [0, 0]])
locs = tf.constant([[[0.1, 0.1, 0.15, 0.15], [0.12, 0.12, 0.15, 0.15]]],
Example #3
0
def main(_argv):
    devices = tf.config.experimental.list_physical_devices('GPU')
    for device in devices:
        tf.config.experimental.set_memory_growth(device, True)

    # Uncomment for debugging only
    # tf.config.run_functions_eagerly(True)
    # tf.debugging.enable_check_numerics()

    # Load config
    if not os.path.exists(FLAGS.config_file):
        logging.error('Not found config file')
        return
    with open(FLAGS.config_file) as f:
        cfg = yaml.load(f, yaml.FullLoader)

    # Load data
    logging.info('Loading train data')
    priors = prior_boxes.PriorBox(cfg).forward()
    anno_dir = os.path.join(FLAGS.data_dir, 'annotations')
    label_map_file = os.path.join(FLAGS.data_dir, 'coco.names')

    train_image_dir = os.path.join(FLAGS.data_dir, 'train2017')
    train_anno_path = os.path.join(anno_dir, 'instances_train2017.json')
    train_loader = DataLoader(priors,
                              batch_size=FLAGS.batch_size,
                              num_workers=FLAGS.num_workers,
                              image_size=cfg['INPUT']['IMAGE_SIZE'],
                              training=True)
    train_data, num_train = train_loader.load(train_image_dir, label_map_file,
                                              train_anno_path)

    logging.info('Loading val data')
    val_image_dir = os.path.join(FLAGS.data_dir, 'val2017')
    val_anno_path = os.path.join(anno_dir, 'instances_val2017.json')
    val_loader = DataLoader(priors,
                            batch_size=FLAGS.batch_size,
                            num_workers=FLAGS.num_workers,
                            image_size=cfg['INPUT']['IMAGE_SIZE'],
                            training=False)
    val_data = val_loader.load(val_image_dir, label_map_file, val_anno_path)

    # Create the model + optimizer
    model = model_factory.create_model(cfg)
    model_name = cfg['MODEL']['NAME']
    logging.info(f'Created model {model_name}')
    # model.summary()
    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.lr)

    # Create a checkpoint for smooth training
    ckpt_prefix = os.path.join(FLAGS.checkpoint_prefix, model_name.lower())
    ckpt = tf.train.Checkpoint(step=tf.Variable(0),
                               optimizer=optimizer,
                               model=model,
                               train_data=train_data)
    manager = tf.train.CheckpointManager(ckpt, ckpt_prefix, max_to_keep=1)

    # Retore variables if checkpoint exists
    ckpt.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        logging.info('Restoring from {}'.format(manager.latest_checkpoint))
    else:
        logging.info('Train the model from scratch')

    # Criterion
    loss_obj = SSDLosses(FLAGS.neg_ratio, FLAGS.num_classes)

    # Loss aggregation
    train_loss = tf.keras.metrics.Mean(name='loss')
    train_conf_loss = tf.keras.metrics.Mean(name='conf_loss')
    train_loc_loss = tf.keras.metrics.Mean(name='loc_loss')

    val_loss = tf.keras.metrics.Mean(name='val_loss')
    val_conf_loss = tf.keras.metrics.Mean(name='val_conf_loss')
    val_loc_loss = tf.keras.metrics.Mean(name='val_loc_loss')

    # Tensorboard
    if os.path.exists('logs'):
        shutil.rmtree('logs')

    train_log_dir = 'logs/train'
    train_writer = tf.summary.create_file_writer(train_log_dir)

    val_log_dir = 'logs/val'
    val_writer = tf.summary.create_file_writer(val_log_dir)
    # steps_per_epoch = num_train // FLAGS.batch_size
    # start = time.time()
    for batch, (images, gt_confs, gt_locs) in enumerate(train_data):
        loss, conf_loss, loc_loss, l2_loss = train_step(
            images, gt_confs, gt_locs, model, loss_obj, optimizer)

        ckpt.step.assign_add(1)
        step = int(ckpt.step)
        if step % 500 == 0:
            save_path = manager.save()
            logging.info('Saved checkpoint for step {}: {}'.format(
                int(ckpt.step), save_path))

        if step % 10 == 0:
            logging.info('iter {:06d}/{:06d} | '
                         'total_loss: {:.2f} conf_loss: {:.2f} '
                         'loc_loss: {:.2f}'.format(step, FLAGS.max_iter,
                                                   loss.numpy(),
                                                   conf_loss.numpy(),
                                                   loc_loss.numpy()))

        train_loss.update_state(loss)
        train_conf_loss.update_state(conf_loss)
        train_loc_loss.update_state(loc_loss)

        if step % 100 == 0:
            with train_writer.as_default():
                tf.summary.scalar('loss', train_loss.result(), step=step)
                tf.summary.scalar('conf_loss',
                                  train_conf_loss.result(),
                                  step=step)
                tf.summary.scalar('loc_loss',
                                  train_loc_loss.result(),
                                  step=step)

            for batch, (images, gt_confs, gt_locs) in enumerate(val_data):
                loss, conf_loss, loc_loss, l2_loss = test_step(
                    images, gt_confs, gt_locs, model, loss_obj)
                val_loss.update_state(loss)
                val_conf_loss.update_state(conf_loss)
                val_loc_loss.update_state(loc_loss)
            logging.info('Evaluation | total_loss: {:.2f} '
                         'conf_loss: {:.2f} loc_loss: {:.2f}'.format(
                             val_loss.result(), val_conf_loss.result(),
                             val_loc_loss.result()))

            with val_writer.as_default():
                tf.summary.scalar('loss', val_loss.result(), step=step)
                tf.summary.scalar('conf_loss',
                                  val_conf_loss.result(),
                                  step=step)
                tf.summary.scalar('loc_loss', val_loc_loss.result(), step=step)

            train_loss.reset_states()
            train_conf_loss.reset_states()
            train_loc_loss.reset_states()

            val_loss.reset_states()
            val_conf_loss.reset_states()
            val_loc_loss.reset_states()

    model.save_weights(ckpt_prefix)
Example #4
0
def main(_argv):
    devices = tf.config.experimental.list_physical_devices('GPU')
    for device in devices:
        tf.config.experimental.set_memory_growth(device, True)

    # tf.config.run_functions_eagerly(True)

    image_size = cfg['INPUT']['IMAGE_SIZE']
    model_name = cfg['MODEL']['NAME']
    boxes_per_location = cfg['MODEL']['PRIORS']['BOXES_PER_LOCATION']
    num_classes = cfg['MODEL']['NUM_CLASSES']
    neg_ratio = cfg['MODEL']['NEG_RATIO']
    num_classes = cfg['MODEL']['NUM_CLASSES']

    # Build the model
    input_shape = (image_size, image_size, 3)
    model = face_detection.FaceDetection(input_shape,
                                         num_classes,
                                         boxes_per_location,
                                         training=True)
    logging.info(f'Build model {model_name}')

    # Optimizer and loss object
    loss_obj = SSDLosses(neg_ratio, num_classes)
    optimizer = tf.keras.optimizers.Adam(FLAGS.lr)
    # optimizer = tf.keras.optimizers.SGD(FLAGS.lr)

    # Checkpoint and checkpoint manager
    ckpt = tf.train.Checkpoint(step=tf.Variable(0),
                               model=model,
                               best_loss=tf.Variable(1e6),
                               optimizer=optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              FLAGS.checkpoint_prefix,
                                              max_to_keep=3)
    ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()
    if ckpt_manager.latest_checkpoint:
        logging.info('Restored model from {}'.format(
            ckpt_manager.latest_checkpoint))
    else:
        logging.info('Training the model from scratch')

    # Load data
    priors = prior_boxes.PriorBox(cfg).forward()
    train_loader = DataLoader(priors,
                              batch_size=FLAGS.batch_size,
                              num_workers=FLAGS.num_workers,
                              image_size=image_size,
                              training=True)
    train_data = train_loader.load(FLAGS.train_file)
    logging.info(f'Loaded train data from {FLAGS.train_file}')

    val_loader = DataLoader(priors,
                            batch_size=FLAGS.batch_size,
                            num_workers=FLAGS.num_workers,
                            image_size=image_size,
                            training=False)
    val_data = val_loader.load(FLAGS.val_file)
    logging.info(f'Loaded validation data from {FLAGS.val_file}')

    # Loss aggregation
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_conf_loss = tf.keras.metrics.Mean(name='train_conf_loss')
    train_loc_loss = tf.keras.metrics.Mean(name='train_loc_loss')

    val_loss = tf.keras.metrics.Mean(name='val_loss')
    val_conf_loss = tf.keras.metrics.Mean(name='val_conf_loss')
    val_loc_loss = tf.keras.metrics.Mean(name='val_loc_loss')

    # Tensorboard summaries
    train_log_dir = 'logs/train'
    val_log_dir = 'logs/val'
    train_writer = tf.summary.create_file_writer(train_log_dir)
    val_writer = tf.summary.create_file_writer(val_log_dir)

    # patience_cnt = 0
    for epoch in range(int(ckpt.step) + 1, FLAGS.epoch + 1):
        start = time.time()
        for batch, (images, gt_confs, gt_locs) in enumerate(train_data):
            loss, conf_loss, loc_loss, l2_loss = train_step(
                images, gt_confs, gt_locs, model, loss_obj, optimizer)
            train_loss.update_state(loss)
            train_conf_loss.update_state(conf_loss)
            train_loc_loss.update_state(loc_loss)

            if (batch + 1) % FLAGS.log_step == 0:
                logging.info('Epoch {} iter {} | conf_loss: {:.2f} '
                             ' loc_loss: {:.2f} l2_loss: {:.2f} '
                             ' loss: {:.2f}'.format(epoch, batch + 1,
                                                    conf_loss.numpy(),
                                                    loc_loss.numpy(),
                                                    l2_loss.numpy(),
                                                    loss.numpy()))
        end = time.time()
        logging.info('Epoch time: {:.2f}s'.format(end - start))

        # Trainning summaries
        with train_writer.as_default():
            tf.summary.scalar('loss', train_loss.result(), step=epoch)
            tf.summary.scalar('conf_loss',
                              train_conf_loss.result(),
                              step=epoch)
            tf.summary.scalar('loc_loss', train_loc_loss.result(), step=epoch)

        train_loss.reset_states()
        train_conf_loss.reset_states()
        train_loc_loss.reset_states()

        # if epoch % FLAGS.eval_step == 0:
        for batch, (images, gt_confs, gt_locs) in enumerate(val_data):
            loss, conf_loss, loc_loss, _ = test_step(images, gt_confs, gt_locs,
                                                     model, loss_obj)
            val_loss.update_state(loss)
            val_conf_loss.update_state(conf_loss)
            val_loc_loss.update_state(loc_loss)

        logging.info('Evaluation | conf_loss: {:.2f} loc_loss {:.2f} '
                     'loss: {:.2f} best_loss: {:.2f}'.format(
                         val_conf_loss.result(), val_loc_loss.result(),
                         val_loss.result(), float(ckpt.best_loss)))

        with val_writer.as_default():
            tf.summary.scalar('loss', val_loss.result(), step=epoch)
            tf.summary.scalar('conf_loss', val_conf_loss.result(), step=epoch)
            tf.summary.scalar('loc_loss', val_loc_loss.result(), step=epoch)

        # Save checkpoint
        save_path = ckpt_manager.save()
        logging.info(f'Saved checkpoint as {save_path}')
        if val_loss.result() <= float(ckpt.best_loss):
            # Save best model in SavedModel format
            model.save(FLAGS.saved_models)
            ckpt.best_loss.assign(val_loss.result())
            logging.info(f'Saved best model in {FLAGS.saved_models}')

        val_loss.reset_states()
        val_conf_loss.reset_states()
        val_loc_loss.reset_states()

        # Increase epoch counter
        ckpt.step.assign_add(1)
Example #5
0
def main(_argv):
    """
    """
    devices = tf.config.experimental.list_physical_devices('GPU')
    for device in devices:
        tf.config.experimental.set_memory_growth(device, True)

    # # Load the model
    # image_size = cfg['INPUT']['IMAGE_SIZE']
    # model_name = cfg['MODEL']['NAME']
    # boxes_per_location = cfg['MODEL']['PRIORS']['BOXES_PER_LOCATION']
    # num_classes = cfg['MODEL']['NUM_CLASSES']
    # neg_ratio = cfg['MODEL']['NEG_RATIO']
    # num_classes = cfg['MODEL']['NUM_CLASSES']
    # input_shape = (image_size, image_size, 3)
    # model = face_detection.FaceDetection(input_shape, num_classes,
    #                                      boxes_per_location, training=False)
    # model_name = cfg['MODEL']['NAME']
    # logging.info(f'Loaded model {model_name}')
    #
    # optimizer = tf.keras.optimizers.Adam(lr=1e-3)
    #
    # # Create a checkpoint for smooth training
    # ckpt = tf.train.Checkpoint(
    #     step=tf.Variable(0), optimizer=optimizer, model=model)
    # manager = tf.train.CheckpointManager(ckpt, FLAGS.model, max_to_keep=1)
    #
    # # Retore variables if checkpoint exists
    # ckpt.restore(manager.latest_checkpoint)
    # if manager.latest_checkpoint:
    #     logging.info('Restoring from {}'.format(manager.latest_checkpoint))
    # else:
    #     logging.info('Train the model from scratch')
    model = tf.keras.models.load_model('checkpoints/saved_models')

    image_path = FLAGS.image
    image_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
    h, w = image_raw.shape[:2]
    image = cv2.resize(image_raw, (320, 320))
    image = image.astype('float32') / 255.
    images = np.expand_dims(image, axis=0)

    priors = prior_boxes.PriorBox(cfg).forward()
    confs, locs = model(images, training=False)
    boxes = decode(priors, tf.squeeze(locs, 0))
    boxes = boxes.numpy()
    scale = np.array([w, h, w, h])
    boxes = boxes * scale

    confs = tf.squeeze(confs, 0)
    scores = confs.numpy()
    scores = scores[:, 1]

    # Ignore low scores
    inds = np.where(scores > FLAGS.conf_threshold)[0]
    boxes = boxes[inds]
    scores = scores[inds]

    # Keep top-k before NMS
    order = scores.argsort()[::-1][:FLAGS.top_k]
    boxes = boxes[order]
    scores = scores[order]

    # NMS
    dets = np.hstack(
        (boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
    selected_idx = np.array([0, 1, 2, 3, 4])
    keep = nms(dets[:, selected_idx], FLAGS.nms_threshold)
    dets = dets[keep, :]

    dets = dets[:FLAGS.keep_top_k, :]
    for b in dets:
        score = float(b[4])
        text = 'score: {:.2f}'.format(score)
        b = list(map(int, b[:4]))
        cv2.rectangle(image_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 1)
        cx = b[0]
        cy = b[1] - 12
        cv2.putText(image_raw, text, (cx, cy),
                    cv2.FONT_HERSHEY_DUPLEX, 0.5, (12, 215, 255))
    cv2.imshow('img', image_raw)
    cv2.waitKey(0)