def __init__(self, input_data, trainable):

        self.trainable = trainable
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_class = len(self.classes)
        self.strides = np.array(cfg.YOLO.STRIDES)
        self.anchors = utils.get_anchors(cfg.YOLO.ANCHORS)
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH
        self.upsample_method = cfg.YOLO.UPSAMPLE_METHOD
        self.layer_nums = cfg.YOLO.LAYER_NUMS

        try:
            self.conv_lbbox, self.conv_mbbox, self.out = self.__build_nework(
                input_data)
        except:
            raise NotImplementedError("Can not build up yolov3 network!")
        print(self.out)
        self.predict_op = tf.argmax(input=self.out,
                                    axis=1,
                                    name='layer_classes')
        print("layer_nums::", self.predict_op)

        with tf.variable_scope('pred_sbbox'):
            self.pred_lbbox = self.decode(self.conv_lbbox, self.anchors[0],
                                          self.strides[0])

        with tf.variable_scope('pred_mbbox'):
            self.pred_mbbox = self.decode(self.conv_mbbox, self.anchors[1],
                                          self.strides[1])
Beispiel #2
0
    def __init__(self):
        self.input_size = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.annotation_path = cfg.TEST.ANNOT_PATH
        self.weight_file = cfg.TEST.WEIGHT_FILE
        self.write_image = cfg.TEST.WRITE_IMAGE
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        self.show_label = cfg.TEST.SHOW_LABEL

        with tf.name_scope('input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')

        model = YOLOV3(self.input_data, self.trainable)
        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
        #
        # with tf.name_scope('ema'):
        #     ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        # self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.weight_file)
Beispiel #3
0
    def __init__(self, dataset_type):
        self.annot_path = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH
        self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE
        self.batch_size = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE
        self.batch_size = self.batch_size
        self.data_aug = cfg.TRAIN.DATA_AUG if dataset_type == 'train' else cfg.TEST.DATA_AUG

        self.train_input_size = cfg.TRAIN.INPUT_SIZE
        self.strides = np.array(cfg.YOLO.STRIDES)
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.max_bbox_per_scale = 150

        self.annotations = self.load_annotations(dataset_type)
        self.num_samples = len(self.annotations)
        self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))
        self.batch_count = 0
    def __init__(self):
        self.input_size = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.annotation_path = cfg.TEST.ANNOT_PATH
        self.weight_file = cfg.TEST.WEIGHT_FILE
        self.write_image = cfg.TEST.WRITE_IMAGE
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        self.show_label = cfg.TEST.SHOW_LABEL
        self.pb_file = "E:/ckpt_dirs/Food_detection/multi_food/20190910/yolo_model.pb"

        graph = tf.Graph()
        with graph.as_default():
            output_graph_def = tf.GraphDef()
            with open(self.pb_file, "rb") as f:
                output_graph_def.ParseFromString(f.read())
                _ = tf.import_graph_def(output_graph_def, name="")

            self.sess = tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True))

            self.input = self.sess.graph.get_tensor_by_name(
                "define_input/input_data:0")
            self.trainable = self.sess.graph.get_tensor_by_name(
                "define_input/training:0")

            self.pred_sbbox = self.sess.graph.get_tensor_by_name(
                "define_loss/pred_sbbox/concat_2:0")
            self.pred_mbbox = self.sess.graph.get_tensor_by_name(
                "define_loss/pred_mbbox/concat_2:0")
            self.pred_lbbox = self.sess.graph.get_tensor_by_name(
                "define_loss/pred_lbbox/concat_2:0")

            self.layer_num = graph.get_tensor_by_name(
                "define_loss/layer_classes:0")
    def __init__(self):
        self.input_size = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.annotation_path = cfg.TEST.ANNOT_PATH
        self.weight_file = "E:/ckpt_dirs/Food_detection/multi_food3/checkpoint/yolov3_train_loss=11.4018.ckpt-98"
        self.write_image = cfg.TEST.WRITE_IMAGE
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        self.show_label = cfg.TEST.SHOW_LABEL

        graph = tf.Graph()
        with graph.as_default():
            self.saver = tf.train.import_meta_graph("{}.meta".format(
                self.weight_file))
            self.sess = tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True))
            self.saver.restore(self.sess, self.weight_file)

            self.input = graph.get_tensor_by_name("define_input/input_data:0")
            self.trainable = graph.get_tensor_by_name(
                "define_input/training:0")

            self.pred_sbbox = graph.get_tensor_by_name(
                "define_loss/pred_sbbox/concat_2:0")
            self.pred_mbbox = graph.get_tensor_by_name(
                "define_loss/pred_mbbox/concat_2:0")
            self.pred_lbbox = graph.get_tensor_by_name(
                "define_loss/pred_lbbox/concat_2:0")

            self.layer_num = graph.get_tensor_by_name(
                "define_loss/layer_classes:0")
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.train_input_size = cfg.TRAIN.INPUT_SIZE
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=config)

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             shape=(None, self.train_input_size, self.train_input_size, 3),
                                             name='input_data')
            self.layer_label = tf.placeholder(dtype=tf.float32, name='layer_label')
            self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, shape=[], name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss= self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.layer_loss = self.model.layer_loss(self.layer_label)
            print("layer_loss::::")
            print(self.layer_loss)
            self.l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
            self.layer_loss = tf.cond(self.layer_loss > 0.01, lambda: self.layer_loss, lambda: 0.0)
            self.loss = self.giou_loss + self.conf_loss + 2 * self.prob_loss + 10 * self.layer_loss + 1e-5 * self.l2_loss
        self.layer_out = self.model.out

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
            # self.learn_rate = tf.train.exponential_decay(self.learn_rate_init, global_step=self.global_step,
            #                                              decay_steps=1000, decay_rate=0.9)
            # self.learn_rate = tf.maximum(self.learn_rate, self.learn_rate_end)
            warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
                                       dtype=tf.float64, name='warmup_steps')
            train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
                                      dtype=tf.float64, name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
                                 (1 + tf.cos(
                                     (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
            )
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                                                     var_list=self.first_stage_trainable_var_list)
            # first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss)
            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                                                      var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("layer_loss", self.layer_loss)
            tf.summary.scalar("total_loss", self.loss)

            test_logdir = "./data/log/test"
            if os.path.exists(test_logdir): shutil.rmtree(test_logdir)
            os.mkdir(test_logdir)
            if os.path.exists(self.train_logdir): shutil.rmtree(self.train_logdir)
            os.mkdir(self.train_logdir)

            self.write_op = tf.summary.merge_all()
            self.train_summary_writer = tf.summary.FileWriter(self.train_logdir, graph=self.sess.graph)
            self.test_summary_writer = tf.summary.FileWriter(test_logdir, graph=self.sess.graph)
        best_bboxes = self.get_top_cls(pred_bbox, org_h, org_w, self.top_n)  # 获取top_n类别和置信度
        bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
        bboxes = utils.nms(bboxes, self.iou_threshold)
        layer_n = layer_[0]  # 烤层结果

        return bboxes, layer_n, best_bboxes


if __name__ == '__main__':
    img_root = "F:/serve_data/202101-04/covert_jpg"  # 图片地址
    img_save = "F:/serve_data/202101-04/classes"  # 图片地址

    import time

    classes = utils.read_class_names(cfg.YOLO.CLASSES)
    classes[40] = "potatom"
    classes[41] = "sweetpotatom"
    classes[101] = "chiffon_size4"

    start_time = time.time()
    Y = YoloPredic()
    end_time0 = time.time()
    print("加载时间:", end_time0 - start_time)
    for img in tqdm(os.listdir(img_root)):
        img_path = img_root + "/" + img
        image = cv2.imread(img_path)  # 图片读取
        bboxes, layer_n, best_bboxes = Y.predict(image)
        bboxes, layer_n, best_bboxes = correct_bboxes(bboxes, layer_n, best_bboxes)  # 矫正输出结果

        if len(bboxes) > 0:
Beispiel #8
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.train_input_size = cfg.TRAIN.INPUT_SIZE
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.tower_grads = []

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             shape=(None,
                                                    self.train_input_size,
                                                    self.train_input_size, 3),
                                             name='input_data')
            self.layer_label = tf.placeholder(dtype=tf.float32,
                                              name='layer_label')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            self.learn_rate = tf.train.exponential_decay(
                self.learn_rate_init,
                global_step=self.global_step,
                decay_steps=1000,
                decay_rate=0.9)
            self.learn_rate = tf.maximum(self.learn_rate, self.learn_rate_end)
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
            self.opt = tf.train.GradientDescentOptimizer(self.learn_rate)

        with tf.name_scope(
                "define_loss",
                tf.variable_scope("define_loss", reuse=tf.AUTO_REUSE)):
            for i in range(4):
                with tf.device('/gpu:{}'.format(i + 4)):
                    self.input_data_i = self.input_data[i *
                                                        cfg.TRAIN.BATCH_SIZE:
                                                        (i + 1) *
                                                        cfg.TRAIN.BATCH_SIZE]
                    self.layer_label_i = self.layer_label[i *
                                                          cfg.TRAIN.BATCH_SIZE:
                                                          (i + 1) *
                                                          cfg.TRAIN.BATCH_SIZE]
                    self.label_sbbox_i = self.label_sbbox[i *
                                                          cfg.TRAIN.BATCH_SIZE:
                                                          (i + 1) *
                                                          cfg.TRAIN.BATCH_SIZE]
                    self.label_mbbox_i = self.label_mbbox[i *
                                                          cfg.TRAIN.BATCH_SIZE:
                                                          (i + 1) *
                                                          cfg.TRAIN.BATCH_SIZE]
                    self.label_lbbox_i = self.label_lbbox[i *
                                                          cfg.TRAIN.BATCH_SIZE:
                                                          (i + 1) *
                                                          cfg.TRAIN.BATCH_SIZE]
                    self.true_sbboxes_i = self.true_sbboxes[
                        i * cfg.TRAIN.BATCH_SIZE:(i + 1) *
                        cfg.TRAIN.BATCH_SIZE]
                    self.true_mbboxes_i = self.true_mbboxes[
                        i * cfg.TRAIN.BATCH_SIZE:(i + 1) *
                        cfg.TRAIN.BATCH_SIZE]
                    self.true_lbboxes_i = self.true_lbboxes[
                        i * cfg.TRAIN.BATCH_SIZE:(i + 1) *
                        cfg.TRAIN.BATCH_SIZE]

                    self.model = YOLOV3(self.input_data_i, self.trainable)
                    self.layer_out = self.model.out
                    self.net_var = tf.global_variables()
                    self.giou_loss, self.conf_loss, self.prob_loss, self.giou, self.bbox_loss_scale = self.model.compute_loss(
                        self.label_sbbox_i, self.label_mbbox_i,
                        self.label_lbbox_i, self.true_sbboxes_i,
                        self.true_mbboxes_i, self.true_lbboxes_i)

                    # self.layer_loss = self.model.layer_loss(self.layer_label_i)
                    # self.layer_loss = tf.cond(self.layer_loss > 0.01, lambda: self.layer_loss, lambda: 0.0)
                    self.layer_loss = 0
                    self.loss = self.giou_loss + self.conf_loss + 2 * self.prob_loss + 10 * self.layer_loss

                    tf.get_variable_scope().reuse_variables()
                    grads = self.opt.compute_gradients(self.loss)
                    self.tower_grads.append(grads)
        grads = average_gradients(self.tower_grads)
        self.train_op = self.opt.apply_gradients(grads)

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("layer_loss", self.layer_loss)
            tf.summary.scalar("total_loss", self.loss)

            test_logdir = "./data/log/test"
            if os.path.exists(test_logdir): shutil.rmtree(test_logdir)
            os.mkdir(test_logdir)
            if os.path.exists(self.train_logdir):
                shutil.rmtree(self.train_logdir)
            os.mkdir(self.train_logdir)

            self.write_op = tf.summary.merge_all()
            self.train_summary_writer = tf.summary.FileWriter(
                self.train_logdir, graph=self.sess.graph)
            self.test_summary_writer = tf.summary.FileWriter(
                test_logdir, graph=self.sess.graph)

        # 参数量计算
        flops = tf.profiler.profile(
            self.sess.graph,
            options=tf.profiler.ProfileOptionBuilder.float_operation())
        params = tf.profiler.profile(self.sess.graph,
                                     options=tf.profiler.ProfileOptionBuilder.
                                     trainable_variables_parameter())
        print('FLOPs: {};    Trainable params: {}'.format(
            flops.total_float_ops, params.total_parameters))