Exemplo n.º 1
0
  def __init__(self, data_format='channels_last'):
    """Constructor function."""

    # class-independent initialization
    super(ModelHelper, self).__init__(data_format, forward_w_labels=True)

    # initialize training & evaluation subsets
    self.dataset_train = PascalVocDataset(preprocess_fn=preprocess_image, is_train=True)
    self.dataset_eval = PascalVocDataset(preprocess_fn=preprocess_image, is_train=False)

    # setup hyper-parameters
    self.batch_size = None  # track the most recently-used one
    self.model_scope = "model"
    def __init__(self):
        """Constructor function."""

        # class-independent initialization
        super(ModelHelper, self).__init__()

        # initialize training & evaluation subsets
        self.dataset_train = PascalVocDataset(is_train=True)
        self.dataset_eval = PascalVocDataset(is_train=False)

        # setup hyper-parameters & anchor information
        self.anchor_info = None  # track the most recently-used one
        self.batch_size = None  # track the most recently-used one
        self.model_scope = None
Exemplo n.º 3
0
def train():
    # sess
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # data
    dataset = PascalVocDataset(True, Param.image_size).build(Param.batch_size)
    img_info, image, groundtruth = dataset.get_next()

    # model
    model = PeleeNetSSD()
    pred_locations, pred_classes = model.forward(image, is_train=True)
    loss = model.calc_loss(pred_locations, pred_classes, groundtruth,
                           tf.trainable_variables())
    sess.run(loss)
    exit()
    acc_top1, acc_top5 = lenet.calc_accuracy(logits, labels)

    # optimizer
    global_step = tf.train.get_or_create_global_step()
    lrn_rate = tf.train.piecewise_constant(global_step, Param.lr_step,
                                           Param.lr_val)
    optimizer = tf.train.AdamOptimizer(lrn_rate)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss, global_step)

    # prepare
    logdir = 'log/' + str(time.time()) + '/'
    sm_writer = tf.summary.FileWriter(logdir, sess.graph)
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('acc_top1', acc_top1)
    tf.summary.scalar('acc_top5', acc_top5)
    tf.summary.scalar('lrn_rate', lrn_rate)
    summary_op = tf.summary.merge_all()
    init_op = tf.global_variables_initializer()
    saver = tf.train.Saver()

    # run
    sess.run(init_op)
    warm_start()
    for index in trange(Param.max_step):
        sess.run(train_op)
        if (index + 1) % Param.summary_step == 0:
            summary = sess.run(summary_op)
            sm_writer.add_summary(summary, index)
        if (index + 1) % Param.save_step == 0:
            saver.save(sess, 'checkpoint/model.ckpt', global_step)
class ModelHelper(AbstractModelHelper):
    """Model helper for creating a VGG model for the VOC dataset."""
    def __init__(self):
        """Constructor function."""

        # class-independent initialization
        super(ModelHelper, self).__init__()

        # initialize training & evaluation subsets
        self.dataset_train = PascalVocDataset(is_train=True)
        self.dataset_eval = PascalVocDataset(is_train=False)

        # setup hyper-parameters & anchor information
        self.anchor_info = None  # track the most recently-used one
        self.batch_size = None  # track the most recently-used one
        self.model_scope = None

    def build_dataset_train(self, enbl_trn_val_split=False):
        """Build the data subset for training, usually with data augmentation."""

        return self.dataset_train.build()

    def build_dataset_eval(self):
        """Build the data subset for evaluation, usually without data augmentation."""

        return self.dataset_eval.build()

    def forward_train(self, inputs, data_format='channels_last'):
        """Forward computation at training."""

        anchor_info = setup_anchor_info()
        outputs, self.model_scope = forward_fn(inputs, True, data_format,
                                               anchor_info)
        self.anchor_info = anchor_info
        self.batch_size = tf.shape(inputs['image'])[0]
        self.trainable_vars = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.model_scope)

        return outputs

    def forward_eval(self, inputs, data_format='channels_last'):
        """Forward computation at evaluation."""

        anchor_info = setup_anchor_info()
        outputs, __ = forward_fn(inputs, False, data_format, anchor_info)
        self.anchor_info = anchor_info
        self.batch_size = tf.shape(inputs['image'])[0]

        return outputs

    def calc_loss(self, objects, outputs, trainable_vars):
        """Calculate loss (and some extra evaluation metrics)."""

        return calc_loss_fn(objects, outputs, trainable_vars, self.anchor_info,
                            self.batch_size)

    def setup_lrn_rate(self, global_step):
        """Setup the learning rate (and number of training iterations)."""

        bnds = [int(x) for x in parse_comma_list(FLAGS.lrn_rate_dcy_bnds)]
        vals = [
            FLAGS.lrn_rate_init * x
            for x in parse_comma_list(FLAGS.lrn_rate_dcy_rates)
        ]
        lrn_rate = tf.train.piecewise_constant(global_step, bnds, vals)
        lrn_rate = tf.maximum(
            lrn_rate, tf.constant(FLAGS.lrn_rate_min, dtype=lrn_rate.dtype))
        nb_iters = FLAGS.nb_iters_train

        return lrn_rate, nb_iters

    def warm_start(self, sess):
        """Initialize the model for warm-start.

        Description:
        * We use a pre-trained ImageNet classification model to initialize the backbone part of the SSD
          model for feature extraction. If the SSD model's checkpoint files already exist, then the
          learner should restore model weights by itself.
        """

        # obtain a list of scopes to be excluded from initialization
        excl_scopes = []
        if FLAGS.warm_start_excl_scopes:
            excl_scopes = [
                scope.strip()
                for scope in FLAGS.warm_start_excl_scopes.split(',')
            ]
        tf.logging.info('excluded scopes: {}'.format(excl_scopes))

        # obtain a list of variables to be initialized
        vars_list = []
        for var in self.trainable_vars:
            excluded = False
            for scope in excl_scopes:
                if scope in var.name:
                    excluded = True
                    break
            if not excluded:
                vars_list.append(var)

        # rename the variables' scope
        if FLAGS.backbone_model_scope is not None:
            backbone_model_scope = FLAGS.backbone_model_scope.strip()
            if backbone_model_scope == '':
                vars_list = {
                    var.op.name.replace(self.model_scope + '/', ''): var
                    for var in vars_list
                }
            else:
                vars_list = {
                    var.op.name.replace(self.model_scope,
                                        backbone_model_scope): var
                    for var in vars_list
                }

        # re-map the variables' names
        name_remap = {'/kernel': '/weights', '/bias': '/biases'}
        vars_list_remap = {}
        for var_name, var in vars_list.items():
            for name_old, name_new in name_remap.items():
                if name_old in var_name:
                    var_name = var_name.replace(name_old, name_new)
                    break
            vars_list_remap[var_name] = var
        vars_list = vars_list_remap

        # display all the variables to be initialized
        for var_name, var in vars_list.items():
            tf.logging.info('using %s to initialize %s' %
                            (var_name, var.op.name))
        if not vars_list:
            raise ValueError('variables to be restored cannot be empty')

        # obtain the checkpoint files' path
        ckpt_path = tf.train.latest_checkpoint(FLAGS.backbone_ckpt_dir)
        tf.logging.info('restoring model weights from ' + ckpt_path)

        # remove missing variables from the list
        if FLAGS.ignore_missing_vars:
            reader = tf.train.NewCheckpointReader(ckpt_path)
            vars_list_avail = {}
            for var in vars_list:
                if reader.has_tensor(var):
                    vars_list_avail[var] = vars_list[var]
                else:
                    tf.logging.warning(
                        'variable %s not found in checkpoint files %s.' %
                        (var, ckpt_path))
            vars_list = vars_list_avail
        if not vars_list:
            tf.logging.warning('no variables to restore.')
            return

        # restore variables from checkpoint files
        saver = tf.train.Saver(vars_list, reshape=False)
        saver.build()
        saver.restore(sess, ckpt_path)

    def dump_n_eval(self, outputs, action):
        """Dump the model's outputs to files and evaluate."""

        if not is_primary_worker('global'):
            return

        if action == 'init':
            if os.path.exists(FLAGS.outputs_dump_dir):
                shutil.rmtree(FLAGS.outputs_dump_dir)
            os.mkdir(FLAGS.outputs_dump_dir)
        elif action == 'dump':
            filename = outputs['predictions']['filename'][0].decode(
                'utf8')[:-4]
            shape = outputs['predictions']['shape'][0]
            for idx_cls in range(1, FLAGS.nb_classes):
                with open(
                        os.path.join(FLAGS.outputs_dump_dir,
                                     'results_%d.txt' % idx_cls),
                        'a') as o_file:
                    scores = outputs['predictions']['scores_%d' % idx_cls][0]
                    bboxes = outputs['predictions']['bboxes_%d' % idx_cls][0]
                    bboxes[:, 0] = (bboxes[:, 0] * shape[0]).astype(
                        np.int32, copy=False) + 1
                    bboxes[:, 1] = (bboxes[:, 1] * shape[1]).astype(
                        np.int32, copy=False) + 1
                    bboxes[:, 2] = (bboxes[:, 2] * shape[0]).astype(
                        np.int32, copy=False) + 1
                    bboxes[:, 3] = (bboxes[:, 3] * shape[1]).astype(
                        np.int32, copy=False) + 1
                    for idx_bbox in range(bboxes.shape[0]):
                        bbox = bboxes[idx_bbox][:]
                        if bbox[2] > bbox[0] and bbox[3] > bbox[1]:
                            o_file.write('%s %.3f %.1f %.1f %.1f %.1f\n' %
                                         (filename, scores[idx_bbox], bbox[1],
                                          bbox[0], bbox[3], bbox[2]))
        elif action == 'eval':
            do_python_eval(os.path.join(FLAGS.data_dir_local, 'VOC2007'),
                           FLAGS.outputs_dump_dir)
        else:
            raise ValueError('unrecognized action in dump_n_eval(): ' + action)

    @property
    def model_name(self):
        """Model's name."""

        return 'ssd_vgg_300'

    @property
    def dataset_name(self):
        """Dataset's name."""

        return 'pascalvoc'
import sys

sys.path.append('./')

import tensorflow as tf
from datasets.pascalvoc_dataset import PascalVocDataset
import cv2
import numpy as np

sess = tf.Session()
dataset = PascalVocDataset(is_train=True, img_size=304)
dataiter = dataset.build(1)
img_info, image, groundtruth = dataiter.get_next()

for i in range(10):
    gt = sess.run([img_info, image, groundtruth])
    img = gt[1][0].astype(np.uint8)
    height, width, channel = img.shape
    for line in gt[2][0]:
        if line[0]:
            cv2.rectangle(img, (int(height * line[2]), int(width * line[1])),
                          (int(height * line[4]), int(width * line[3])),
                          (0, 255, 0), 1)
    cv2.imshow('win', img)
    cv2.waitKey()
Exemplo n.º 6
0
class ModelHelper(AbstractModelHelper):
  """Model helper for creating a VGG model for the VOC dataset."""

  def __init__(self, data_format='channels_last'):
    """Constructor function."""

    # class-independent initialization
    super(ModelHelper, self).__init__(data_format, forward_w_labels=True)

    # initialize training & evaluation subsets
    self.dataset_train = PascalVocDataset(preprocess_fn=preprocess_image, is_train=True)
    self.dataset_eval = PascalVocDataset(preprocess_fn=preprocess_image, is_train=False)

    # setup hyper-parameters
    self.batch_size = None  # track the most recently-used one
    self.model_scope = "model"

  def build_dataset_train(self, enbl_trn_val_split=False):
    """Build the data subset for training, usually with data augmentation."""
    return self.dataset_train.build()

  def build_dataset_eval(self):
    """Build the data subset for evaluation, usually without data augmentation."""
    return self.dataset_eval.build()

  def forward_train(self, inputs, objects, data_format='channels_last'):
    """Forward computation at training."""
    inputs_dict = {'inputs': inputs, 'objects': objects}
    outputs = forward_fn(inputs_dict, True)
    self.vars = slim.get_model_variables()
    return outputs

  def forward_eval(self, inputs, data_format='channels_last'):
    """Forward computation at evaluation."""
    inputs_dict = {'inputs': inputs, 'objects': None}
    outputs = forward_fn(inputs_dict, False)
    return outputs

  def calc_loss(self, objects, outputs, trainable_vars):
    """Calculate loss (and some extra evaluation metrics)."""
    forward_dict = outputs['forward_dict']
    metrics = outputs['metrics']
    loss = tf.constant(0,dtype=tf.float32)
    if forward_dict != {}:
      """only build loss at training"""
      loss = calc_loss_fn(objects, forward_dict, trainable_vars)
    return loss, metrics

  def setup_lrn_rate(self, global_step):
    """Setup the learning rate (and number of training iterations)."""

    lrn_rate = tf.train.piecewise_constant(global_step,
                                     boundaries=[np.int64(cfgs.DECAY_STEP[0]), np.int64(cfgs.DECAY_STEP[1])],
                                     values=[cfgs.LR, cfgs.LR / 10., cfgs.LR / 100.])
    nb_iters = FLAGS.nb_iters_train

    tf.summary.scalar('lrn_rate', lrn_rate)

    return lrn_rate, nb_iters

  def warm_start(self, sess):
    """Initialize the model for warm-start.

    Description:
    * We use a pre-trained ImageNet classification model to initialize the backbone part of the SSD
      model for feature extraction. If the SSD model's checkpoint files already exist, then skip.
    """
    # early return if checkpoint files already exist
    checkpoint_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.save_path))
    model_variables = self.vars
    if checkpoint_path != None:
      if cfgs.RESTORE_FROM_RPN:
        print('___restore from rpn___')

        restore_variables = [var for var in model_variables if not var.name.startswith(self.model_scope + 'FastRCNN_Head')] + \
                            [slim.get_or_create_global_step()]
        for var in restore_variables:
          print(var.name)
        saver = tf.train.Saver()
        saver.build()
        saver.restore(sess, checkpoint_path)
      else:
        print("___restore from trained model___")
        for var in model_variables:
          print(var.name)
        saver = tf.train.Saver(model_variables)
        saver.build()
        saver.restore(sess, checkpoint_path)
      print("model restore from :", checkpoint_path)
    else:
      if cfgs.NET_NAME.startswith("resnet"):
        weights_name = cfgs.NET_NAME
      elif cfgs.NET_NAME.startswith("MobilenetV2"):
        weights_name = "mobilenet/mobilenet_v2_1.0_224"
      else:
        raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
      checkpoint_path = os.path.join(FLAGS.backbone_ckpt_dir, weights_name + '.ckpt')
      print("model restore from pretrained mode, path is :", checkpoint_path)
      # for var in model_variables:
      #     print(var.name)
      # print(20*"__++__++__")

      def name_in_ckpt_rpn(var):
        '''
        model/resnet_v1_50/block4 -->resnet_v1_50/block4
        model/MobilenetV2/** -- > MobilenetV2 **
        :param var:
        :return:
        '''
        return '/'.join(var.op.name.split('/')[1:])

      def name_in_ckpt_fastrcnn_head(var):
        '''
        model/Fast-RCNN/resnet_v1_50/block4 -->resnet_v1_50/block4
        model/Fast-RCNN/MobilenetV2/** -- > MobilenetV2 **
        :param var:
        :return:
        '''
        return '/'.join(var.op.name.split('/')[2:])
      nameInCkpt_Var_dict = {}
      for var in model_variables:
        if var.name.startswith(self.model_scope + '/Fast-RCNN/' + cfgs.NET_NAME):  # +'/block4'
          var_name_in_ckpt = name_in_ckpt_fastrcnn_head(var)
          nameInCkpt_Var_dict[var_name_in_ckpt] = var
        else:
          if var.name.startswith(self.model_scope + '/' + cfgs.NET_NAME):
            var_name_in_ckpt = name_in_ckpt_rpn(var)
            nameInCkpt_Var_dict[var_name_in_ckpt] = var
          else:
            continue
      restore_variables = nameInCkpt_Var_dict
      if not restore_variables:
        tf.logging.warning('no variables to restore.')
        return
      for key, item in restore_variables.items():
        print("var_in_graph: ", item.name)
        print("var_in_ckpt: ", key)
        print(20 * "___")
      # restore variables from checkpoint files
      saver = tf.train.Saver(restore_variables, reshape=False)
      saver.build()
      saver.restore(sess, checkpoint_path)
      print(20 * "****")
      print("restore from pretrained_weighs in IMAGE_NET")
    print('model restored')


  def dump_n_eval(self, outputs, action):
    """Dump the model's outputs to files and evaluate."""
    if not is_primary_worker('global'):
      return
    if action == 'init':
      if os.path.exists(FLAGS.outputs_dump_dir):
        shutil.rmtree(FLAGS.outputs_dump_dir)
      os.mkdir(FLAGS.outputs_dump_dir)

    elif action == 'dump':
      filename = outputs['predictions']['filename'][0].decode('utf8')[:-4]
      raw_shape = outputs['predictions']['shape'][0]
      resized_shape= outputs['predictions']['resized_shape']

      detected_boxes = outputs['predictions']['detected_boxes']
      detected_scores = outputs['predictions']['detected_scores']
      detected_categories = outputs['predictions']['detected_categories']


      raw_h, raw_w = raw_shape[0], raw_shape[1]
      resized_h, resized_w = resized_shape[1], resized_shape[2]

      xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
                               detected_boxes[:, 2], detected_boxes[:, 3]

      xmin = xmin * raw_w / resized_w
      xmax = xmax * raw_w / resized_w
      ymin = ymin * raw_h / resized_h
      ymax = ymax * raw_h / resized_h

      boxes = np.transpose(np.stack([xmin, ymin, xmax, ymax]))
      dets = np.hstack((detected_categories.reshape(-1, 1),
                        detected_scores.reshape(-1, 1),
                        boxes))

      for cls_id in range(1, FLAGS.nb_classes):
        with open(os.path.join(FLAGS.outputs_dump_dir, 'results_%d.txt' % cls_id), 'a') as o_file:
          this_cls_detections = dets[dets[:, 0] == cls_id]
          if this_cls_detections.shape[0] == 0:
            continue  # this cls has none detections in this img
          for a_det in this_cls_detections:
            o_file.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
                    format(filename, a_det[1],
                           a_det[2], a_det[3],
                           a_det[4], a_det[5]))  # that is [img_name, score, xmin, ymin, xmax, ymax]

    elif action == 'eval':
      do_python_eval(os.path.join(self.dataset_eval.data_dir, 'test'), FLAGS.outputs_dump_dir)
    else:
      raise ValueError('unrecognized action in dump_n_eval(): ' + action)

  @property
  def model_name(self):
    """Model's name."""
    return cfgs.NET_NAME

  @property
  def dataset_name(self):
    """Dataset's name."""
    return 'pascalvoc'
Exemplo n.º 7
0
class ModelHelper(AbstractModelHelper):
    """Model helper for creating a VGG model for the VOC dataset."""
    def __init__(self):
        """Constructor function."""

        # class-independent initialization
        super(ModelHelper, self).__init__()

        # initialize training & evaluation subsets
        self.dataset_train = PascalVocDataset(is_train=True)
        self.dataset_eval = PascalVocDataset(is_train=False)

        # setup hyper-parameters & anchor information
        self.anchor_info = None  # track the most recently-used one
        self.batch_size = None  # track the most recently-used one
        self.model_scope = None

    def build_dataset_train(self, enbl_trn_val_split=False):
        """Build the data subset for training, usually with data augmentation."""

        return self.dataset_train.build()

    def build_dataset_eval(self):
        """Build the data subset for evaluation, usually without data augmentation."""

        return self.dataset_eval.build()

    def forward_train(self, inputs, data_format='channels_last'):
        """Forward computation at training."""

        anchor_info = setup_anchor_info()
        outputs, self.model_scope = forward_fn(inputs, True, data_format,
                                               anchor_info)
        self.anchor_info = anchor_info
        self.batch_size = tf.shape(inputs['image'])[0]
        self.trainable_vars = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.model_scope)

        return outputs

    def forward_eval(self, inputs, data_format='channels_last'):
        """Forward computation at evaluation."""

        anchor_info = setup_anchor_info()
        outputs, __ = forward_fn(inputs, False, data_format, anchor_info)
        self.anchor_info = anchor_info
        self.batch_size = tf.shape(inputs['image'])[0]

        return outputs

    def calc_loss(self, objects, outputs, trainable_vars):
        """Calculate loss (and some extra evaluation metrics)."""

        return calc_loss_fn(objects, outputs, trainable_vars, self.anchor_info,
                            self.batch_size)

    def setup_lrn_rate(self, global_step):
        """Setup the learning rate (and number of training iterations)."""

        bnds = [int(x) for x in parse_comma_list(FLAGS.lrn_rate_dcy_bnds)]
        vals = [
            FLAGS.lrn_rate_init * x
            for x in parse_comma_list(FLAGS.lrn_rate_dcy_rates)
        ]
        lrn_rate = tf.train.piecewise_constant(global_step, bnds, vals)
        lrn_rate = tf.maximum(
            lrn_rate, tf.constant(FLAGS.lrn_rate_min, dtype=lrn_rate.dtype))
        nb_iters = FLAGS.nb_iters_train

        return lrn_rate, nb_iters

    def warm_start(self, sess):
        """Initialize the model for warm-start.

        Description:
        * We use a pre-trained ImageNet classification model to initialize the backbone part of the SSD
          model for feature extraction. If the SSD model's checkpoint files already exist, then the
          learner should restore model weights by itself.
        """
        vars_to_restore = {
            var.op.name: var
            for var in sess.graph.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES)
        }
        vars_to_restore = {
            key.replace(FLAGS.model_scope + '/', ''): val
            for key, val in vars_to_restore.items()
        }
        backbone_ckpt_path = tf.train.latest_checkpoint(
            FLAGS.backbone_ckpt_dir)
        reader = tf.train.NewCheckpointReader(backbone_ckpt_path)
        ckpt_varlist = reader.get_variable_to_shape_map().keys()
        new_vars_map = {}
        for name, var in vars_to_restore.items():
            if name in ckpt_varlist:
                new_vars_map[name] = var
            else:
                tf.logging.info('missing variable named {}'.format(name))
        assert len(new_vars_map) > 0, 'no variables to restore'
        saver = tf.train.Saver(new_vars_map)
        saver.restore(sess, backbone_ckpt_path)

    def dump_n_eval(self, outputs, action):
        """Dump the model's outputs to files and evaluate."""

        if not is_primary_worker('global'):
            return

        if action == 'init':
            if os.path.exists(FLAGS.outputs_dump_dir):
                shutil.rmtree(FLAGS.outputs_dump_dir)
            os.mkdir(FLAGS.outputs_dump_dir)
        elif action == 'dump':
            filename = outputs['predictions']['filename'][0].decode(
                'utf8')[:-4]
            shape = outputs['predictions']['shape'][0]
            for idx_cls in range(1, FLAGS.nb_classes):
                with open(
                        os.path.join(FLAGS.outputs_dump_dir,
                                     'results_%d.txt' % idx_cls),
                        'a') as o_file:
                    scores = outputs['predictions']['scores_%d' % idx_cls][0]
                    bboxes = outputs['predictions']['bboxes_%d' % idx_cls][0]
                    bboxes[:, 0] = (bboxes[:, 0] * shape[0]).astype(
                        np.int32, copy=False) + 1
                    bboxes[:, 1] = (bboxes[:, 1] * shape[1]).astype(
                        np.int32, copy=False) + 1
                    bboxes[:, 2] = (bboxes[:, 2] * shape[0]).astype(
                        np.int32, copy=False) + 1
                    bboxes[:, 3] = (bboxes[:, 3] * shape[1]).astype(
                        np.int32, copy=False) + 1
                    for idx_bbox in range(bboxes.shape[0]):
                        bbox = bboxes[idx_bbox][:]
                        if bbox[2] > bbox[0] and bbox[3] > bbox[1]:
                            o_file.write('%s %.3f %.1f %.1f %.1f %.1f\n' %
                                         (filename, scores[idx_bbox], bbox[1],
                                          bbox[0], bbox[3], bbox[2]))
        elif action == 'eval':
            do_python_eval(os.path.join(FLAGS.data_dir_local, 'VOC2007'),
                           FLAGS.outputs_dump_dir)
        else:
            raise ValueError('unrecognized action in dump_n_eval(): ' + action)

    @property
    def model_name(self):
        """Model's name."""

        return 'peleenet_ssd'

    @property
    def dataset_name(self):
        """Dataset's name."""

        return 'pascalvoc'