def main(argv=None):
    assert FLAGS.base_dir, 'Directory base_dir not set.'
    assert os.path.exists(FLAGS.base_dir), \
      'Directory %s not exist.' % FLAGS.base_dir
    os.makedirs(FLAGS.output_dir, exist_ok=True)

    files = os.listdir(FLAGS.base_dir)
    json_files = filter(lambda f: f.endswith('.json'), files)
    json_file_paths = list(
        map(lambda f: os.path.join(FLAGS.base_dir, f), json_files))

    feature_map = tf.get_variable(shape=FEATURE_MAP_SHAPE, name='feature_map')
    ssd_layers = create_ssd_layers(feature_map)

    file_count = len(json_file_paths)

    total_face_count = 0
    total_match_count = 0

    for index, json_path in enumerate(json_file_paths):
        for rotate in range(4):
            annotation = _load_annotation(json_path, rotate=rotate)
            image_file_name = annotation['file_name']

            regions = annotation['regions']
            face_regions = list(
                filter(lambda region: region['label'] == 0, regions))

            boxes = create_boxes(ssd_layers)

            face_count, match_count = _assign_box(face_regions, boxes)
            total_face_count += face_count
            total_match_count += match_count

            image_path = os.path.join(FLAGS.base_dir, image_file_name)
            image = _create_image_data(image_path, rotate=rotate)

            name, _ = os.path.splitext(image_file_name)
            tfrecord_name = '%s_%d.tfrecord' % (name, rotate)
            tfrecord_path = os.path.join(FLAGS.output_dir, tfrecord_name)

            _create_tfrecord(image, image_file_name, boxes, tfrecord_path)

            if FLAGS.debug:
                debug_image_file_name = '%s_%d.jpg' % (name, rotate)
                debug_image_file_path = os.path.join(FLAGS.output_dir,
                                                     debug_image_file_name)
                _create_debug_image(image, boxes, debug_image_file_path)

        percentage = total_match_count / float(total_face_count)
        print('%d/%d, %d/%d - %f' % (index + 1, file_count, total_match_count,
                                     total_face_count, percentage))
def _export_boxes_position(feature_map, output_dir):
  box_layers = create_ssd_layers(feature_map)
  boxes = create_boxes(box_layers)

  box_position = []
  for index, box in enumerate(boxes):
    box_position.append({
      'index': index,
      'left': box.left,
      'top': box.top,
      'right': box.right,
      'bottom': box.bottom,
    })

  file_name = '%s_boxes_position.json' % model.NAME
  box_position_path = os.path.join(output_dir, file_name)
  with open(box_position_path, mode='w') as fp:
    json.dump(box_position, fp, indent=4)
Ejemplo n.º 3
0
def main(argv=None):
  assert FLAGS.train_path, 'train_path not set'

  assert FLAGS.image_path, 'image_path not set'
  assert os.path.exists(FLAGS.image_path), '%s is not exist' % FLAGS.image_path
  print(FLAGS.image_path)

  os.makedirs(FLAGS.output_dir, exist_ok=True)

  raw_image, resized_image = _get_image_data(FLAGS.image_path, model.IMAGE_SIZE)

  image = np.array(resized_image.getdata()) \
    .reshape(model.IMAGE_SIZE, model.IMAGE_SIZE, model.CHANNELS) \
    .astype(np.float32)

  file_name = os.path.basename(FLAGS.image_path)
  name, _ = os.path.splitext(file_name)

  output_image_file_name = '%s.jpg' % name
  output_image_path = os.path.join(FLAGS.output_dir, output_image_file_name)

  output_result_image_file_name = '%s_result.jpg' % name
  output_result_image_path = os.path.join(FLAGS.output_dir,
                                          output_result_image_file_name)

  global_step = tf.Variable(0, trainable=False)

  image_ph = tf.placeholder(tf.float32,
                            [model.IMAGE_SIZE, model.IMAGE_SIZE, model.CHANNELS])
  normalized_image = image_ph * (1 / 255.0)

  image_batch = tf.expand_dims(normalized_image, axis=0)

  feature_map = model.base_layers(image_batch, is_train=False)
  ssd_logits = model.ssd_layers(feature_map, is_train=False)

  loc_logits = ssd_logits[:, :, :4]
  conf_logits = ssd_logits[:, :, 4:]

  loc_logits = tf.nn.tanh(loc_logits)
  conf_logits = tf.nn.sigmoid(conf_logits)

  ssd_logits = tf.concat([loc_logits, conf_logits], axis=2)

  saver = tf.train.Saver(var_list=tf.global_variables(),
                         max_to_keep=3)

  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, FLAGS.train_path)

    start = time.time()

    ssd_logits_values = sess.run(ssd_logits,
                                 feed_dict={
                                   image_ph: image
                                 })

    elapsed = time.time() - start
    print('Inference Elapsed %f s' % elapsed)

    start = time.time()

    ssd_layers = create_ssd_layers(feature_map)
    boxes = create_boxes(ssd_layers)

    for index, box_value in enumerate(ssd_logits_values[0]):
      boxes[index].label = box_value[4:]
      boxes[index].offset = box_value[:4]

    filtered_boxes = filter(
      lambda box: box.label[0] > FLAGS.confidence_threshold,
      boxes)

    faces = list(map(lambda box: Face(box), filtered_boxes))
    _merge_faces(faces, overlap_threshold=0.3)

    elapsed = time.time() - start
    print('Post-process Elapsed %f s' % elapsed)

    result_image = _draw_recognized_faces(raw_image.copy(), faces)

    result_image.save(output_result_image_path, format='jpeg')
    raw_image.save(output_image_path, format='jpeg')