Пример #1
0
def test1():
    # with tf.device("/gpu:0"):
    # [batch_size, #anchors]s
    # loc_preds = tf.random_uniform([3, 10, 4])
    # loc_trues = tf.random_uniform([3, 10, 4])
    # cls_preds = tf.random_uniform([3, 10, 12])
    # cls_trues = tf.random_uniform([3, 10])

    from inputs_multi import dataset_generator
    from retinanet2.retinanet import RetinaNet
    from encoder import BoxEncoder

    image_size = (448, 672)

    dataset = dataset_generator('val', image_size, 1, 1, 100)
    model = RetinaNet()
    anchor_boxes = BoxEncoder().get_anchor_boxes(image_size)

    with tf.device("/gpu:0"):
        for i, (image, loc_trues,
                cls_trues) in enumerate(tfe.Iterator(dataset)):
            loc_preds, cls_preds = model(image, is_training=True)
            loc_loss, cls_loss, ious_loss = loss_fn(loc_preds,
                                                    loc_trues,
                                                    cls_preds,
                                                    cls_trues,
                                                    anchor_boxes,
                                                    num_classes=1)
            # loc_loss, cls_loss = loss_fn(loc_preds, loc_trues, cls_preds, cls_trues, num_classes=1)
            print(
                "Step 0: Location loss: {:.5f}  |  Class loss: {:.5f}".format(
                    loc_loss.numpy(), cls_loss.numpy()))
Пример #2
0
    def __init__(self,
                 n_channel=3,
                 num_class=conf.num_class,
                 image_size=conf.input_size):
        self.n_channel = n_channel
        self.num_class = num_class
        self.image_size = image_size

        #channels_first
        #self.images = tf.placeholder(dtype=tf.float32, shape=[None, self.n_channel, self.image_size, self.image_size],name='images')
        self.images = tf.placeholder(dtype=tf.float32,
                                     shape=[
                                         None, self.image_size[0],
                                         self.image_size[1], self.n_channel
                                     ],
                                     name='images')
        self.is_training = tf.placeholder(dtype=tf.bool, name='traing_mode')
        self.global_step = tf.Variable(0, dtype=tf.int64, name='global_step')

        model = RetinaNet()
        #self.loc_preds, self.cls_preds = model(self.images, training=self.is_training)     #retinanet1
        self.loc_preds, self.cls_preds = model(
            self.images, is_training=self.is_training)  #retinanet2
        self.d_bboxes, self.d_cls_pred, self.d_score = BoxEncoder().decode(
            self.loc_preds[0],
            self.cls_preds[0],
            self.image_size,
            tf_box_order=conf.tf_box_order)
Пример #3
0
def compute_mAP(generator, batch, input_size, all_detections, loc_preds,
                cls_preds):
    output_size = (1080, 1920)
    for i, (loc_pred, cls_pred) in enumerate(zip(loc_preds, cls_preds)):
        bboxes, cls_pred, score = BoxEncoder().decode(
            loc_pred, cls_pred, input_size, tf_box_order=conf.tf_box_order)
        if conf.tf_box_order:
            bboxes[:, [0, 1, 2, 3]] = bboxes[:, [1, 0, 3, 2]]
            input_size1 = (input_size[1], input_size[0])
            output_size1 = (output_size[1], output_size[0])

        input_scale = list(input_size1) * 2
        output_scale = list(output_size1) * 2
        bboxes = bboxes / input_scale * output_scale
        bboxes = np.clip(bboxes[:, :], 0, output_scale).astype(int)

        image_detections = np.concatenate((bboxes, np.expand_dims(
            score, axis=1), np.expand_dims(cls_pred, axis=1)),
                                          axis=1)
        for label in range(generator.num_classes()):
            all_detections[i +
                           batch * conf.batch_size][label] = image_detections[
                               image_detections[:, -1] == label, :-1]
        #sys.stdout.write('process: [{}/{}]  used_time: {:.2f}ms\r'.format(i + 1, num_images, used_time))
        #sys.stdout.flush()
    return all_detections
Пример #4
0
    def mAP_eavlution1(self):
        """test model on `dataset` not using `optimizer`."""
        print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")

        total_time = 0.
        total_losses = 0.
        num_images = generator.size()

        all_annotations, img_size = get_annotations(generator)

        all_detections = [[None for i in range(generator.num_classes())] for j in range(num_images)]
        img_path_list = generator.get_imgpath_list()
        for ind, img_path in enumerate(img_path_list):
            image_4D, output_size = preprocess(img_path, self.image_size)
            boxes = all_annotations[ind][0].tolist()
            labels = tf.ones(len(boxes), tf.int32)
            boxes /= np.array(img_size[ind]*2)

            print(type(boxes))
            print(labels)
            #boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)
            #boxes = tf.cast(boxes, dtype=tf.float32)
            boxes = boxes.astype(np.float32)
            loc_trues, cls_trues = BoxEncoder().encode(boxes, labels, conf.input_size)
            print(loc_trues)
            print(type(loc_trues))
            print(cls_trues)
            print(type(cls_trues))
            print(tf.reshape(loc_trues, [1, -1, 4]))
            print(np.expand_dims(np.array(cls_trues), axis=0))


            start = time.time()
            [loc_loss, cls_loss, total_loss, iou_loss, d_bboxes, d_cls_pred, d_score] = self.sess.run(
            fetches=[self.loc_loss, self.cls_loss, self.total_loss, self.iou_loss, self.d_bboxes, self.d_cls_pred, self.d_score],
            feed_dict={self.images:image_4D, self.loc_trues:np.array(loc_trues).reshape([1, -1, 4]), self.cls_trues:np.array(cls_trues).reshape([1, -1]), self.is_training:False})

            used_time = (time.time() - start)
            all_detections = self.compute_mAP(generator, ind, self.image_size, output_size, all_detections, d_bboxes, d_cls_pred, d_score)

            total_losses += total_loss
            if ind+1 % (100) == 0:
                print("[EVALUATION] Batch: [{}/{:.0f}] ({:.0f}/{})\t".format(ind, num_images, epoch, conf.num_epochs),
                      "loc_loss: {:.6f} | cls_loss: {:.6f} | iou_loss: {:.6f} | total_loss: {:.3f} | used_time: {:.2f}".format(
                                                                        loc_loss, cls_loss, iou_loss, total_loss, used_time))


            #sys.stdout.write('process: [{}/{}]  used_time: {:.2f}ms\r'.format(ind + 1, num_images, used_time*1000))
            #sys.stdout.flush()
        print("Total average loss: {:.4f}".format(total_losses/num_images))
        print('\n\n >>>> Is Computing , please waiting... >>>>\n')
        average_precisions = evaluate(generator, all_detections, iou_threshold=0.5)
        mAP = self.print_evaluation(average_precisions)
        return mAP, total_losses/num_images
Пример #5
0
        for item in ['cls', 'scores']:
            eval('batch_' + item).append(
                tf.gather(eval(item), max_score_id).numpy())
    return [
        tf.convert_to_tensor(item, dtype=tf.float32)
        for item in [batch_loc, batch_cls, batch_scores]
    ]


assert tfe.num_gpus() > 0, 'Make sure the GPU device exists'
device_name = '/gpu:{}'.format(args.cuda_device)
print('\n==> ==> ==> Using device {}'.format(device_name))

dataset, dataset_size = deploy_dataset_generator()
model = RetinaNet()
box_encoder = BoxEncoder()


def save_deployment():
    with tfe.restore_variables_on_create(
            tf.train.latest_checkpoint(conf.checkpoint_dir)):
        # epoch = tfe.Variable(1., name='epoch')
        # print('==> ==> ==> Restore from epoch {}...\n'.format(epoch.numpy()))
        gs = tf.train.get_or_create_global_step()
        print('==> ==> ==> Restore from global step {}...\n'.format(
            gs.numpy()))

        deploy_results = []
        # batch images
        for im_batch, p_batch in tqdm(tfe.Iterator(dataset),
                                      total=dataset_size // conf.batch_size,
Пример #6
0
def inference(generator, model_ckpt_path):
    # gpu_options = tf.GPUOptions(allow_growth=True)
    # config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.25

    graph = tf.Graph()
    sess = tf.Session(graph=graph, config=config)
    with graph.as_default():
        #saver=tf.train.import_meta_graph('./checkpoints/test1/model_31350.ckpt.meta')
        #saver.restore(sess,'./checkpoints/test1/model_31350.ckpt')        #2cls_test_fl_alt_e3m

        saver = tf.train.import_meta_graph(model_ckpt_path + '.meta')
        saver.restore(sess, model_ckpt_path)

        input = graph.get_tensor_by_name('images:0')
        is_training = graph.get_tensor_by_name('traing_mode:0')
        loc_preds = tf.get_collection('output_tensor')[0]
        cls_preds = tf.get_collection('output_tensor')[1]

        input_size = conf.input_size
        box_encoder = BoxEncoder()
        _bboxes, _cls_pred, _score = box_encoder.decode(
            loc_preds[0], cls_preds[0], input_size)

        label_2_name_map = label_to_name_map()
        print("\n>>>>>>>>>>>>>>Test<<<<<<<<<<<<<<<<<<<\n")

        num_images = generator.size()
        all_detections = [[None for i in range(generator.num_classes())]
                          for j in range(num_images)]
        img_path_list = generator.get_imgpath_list()
        for i, img_path in enumerate(img_path_list):
            image_4D, output_size = preprocess(img_path, input_size)

            start_t = time.time()
            [bboxes, cls_pred,
             score] = sess.run(fetches=[_bboxes, _cls_pred, _score],
                               feed_dict={
                                   input: image_4D,
                                   is_training: False
                               })
            used_time = (time.time() - start_t) * 1000
            #print("used time: [{:.2f}]".format((time.time()-start_t) * 1000))
            #draw_bbox(result_dir, label_2_name_map, img_path, i, bboxes, cls_pred, score, input_size, output_size)

            # if conf.tf_box_order:
            #     bboxes[:, [0, 1, 2, 3]] = bboxes[:, [1, 0, 3, 2]]
            input_size1 = (input_size[1], input_size[0])
            output_size1 = (output_size[1], output_size[0])

            input_scale = list(input_size1) * 2
            output_scale = list(output_size1) * 2
            bboxes = bboxes / input_scale * output_scale
            bboxes = np.clip(bboxes[:, :], 0, output_scale).astype(int)

            image_detections = np.concatenate(
                (bboxes, np.expand_dims(
                    score, axis=1), np.expand_dims(cls_pred, axis=1)),
                axis=1)
            for label in range(generator.num_classes()):
                all_detections[i][label] = image_detections[
                    image_detections[:, -1] == label, :-1]
            sys.stdout.write('process: [{}/{}]  used_time: {:.2f}ms\r'.format(
                i + 1, num_images, used_time))
            sys.stdout.flush()
        print('\n\n >>>> Is Computing , please waiting... >>>>\n')
        average_precisions = evaluate(generator,
                                      all_detections,
                                      iou_threshold=0.5)
        print_evaluation(average_precisions)

    sess.close()
Пример #7
0
 def _encode_boxes(image, bboxes, labels):
     loc_target, cls_target = BoxEncoder().encode(bboxes, labels,
                                                  input_size)
     return image, loc_target, cls_target
Пример #8
0
def test(mode='test'):

    from inputs_multi import parse_anno_xml
    from utils.preprocess import preprocess_for_train
    from retinanet2.retinanet import RetinaNet
    from encoder import BoxEncoder
    import numpy as np

    image_size = (224, 384)
    model = RetinaNet('ShuffleNetV2')
    anchor_boxes = BoxEncoder().get_anchor_boxes(image_size)
    print(anchor_boxes[:30])
    #
    # result_dir = r'../inference/input_test2'
    # if not os.path.exists(result_dir):
    #     os.mkdir(result_dir)

    # impath = r'/workspace/tensorflow/object_det/data/body_detection_data/mirror/nantong/nantong_images/3652615053362176_0.jpg'
    # xml_path = r'/workspace/tensorflow/object_det/data/body_detection_data/mirror/nantong/nantong_annotations_xml/3652615053362176_0.xml'
    impath = r'/workspace/tensorflow/object_det/data/body_detection_data/mirror/spring/v0_JPEGImages/164_1040.jpg'
    xml_path = r'/workspace/tensorflow/object_det/data/body_detection_data/mirror/spring/v0_Annotations_xml/164_1040.xml'

    bboxes, labels = parse_anno_xml(xml_path)
    #print(type(bboxes))
    box = bboxes.copy()
    box *= np.array([image_size[1], image_size[0]] * 2)
    print(box[:, 2:] - box[:, :2])
    #img_size =tf.convert_to_tensor(img_size, name='image_size')
    im_raw = tf.read_file(impath)
    image = tf.image.decode_jpeg(im_raw, channels=3)

    print("#########################")
    bboxes, labels = tf.convert_to_tensor(bboxes), tf.convert_to_tensor(labels)
    image, bboxes, labels = preprocess_for_train(image,
                                                 bboxes,
                                                 labels,
                                                 out_shape=image_size)

    loc_trues, cls_trues = BoxEncoder().encode(bboxes,
                                               labels,
                                               image_size,
                                               pos_iou_threshold=0.5,
                                               neg_iou_threshold=0.33)
    if conf.use_secondbig_loss_constrain:
        #loc_trues = tf.stack([loc_trues, loc_trues],axis=0)
        loc_trues = tf.stack([loc_trues], axis=0)
    else:
        loc_trues = tf.stack([loc_trues], axis=0)
    cls_trues = tf.stack([cls_trues], axis=0)
    #print(loc_trues, cls_trues)
    image = tf.stack([image], axis=0)
    loc_preds, cls_preds = model(image, is_training=True)
    #print(loc_preds, cls_preds)
    #print(image)
    loc_loss, cls_loss, ious_loss = loss_fn(loc_preds,
                                            loc_trues,
                                            cls_preds,
                                            cls_trues,
                                            anchor_boxes,
                                            num_classes=1)
    #loss = iou_loss(loc_preds, loc_trues, anchor_boxes, ious_pred)
    print(loc_loss, cls_loss, ious_loss)
Пример #9
0
 def _encode_boxes(image, bboxes, labels):
     loc_target, cls_target = BoxEncoder().encode(bboxes, labels, input_size,
                                                     pos_iou_threshold=conf.pos_iou_threshold,
                                                     neg_iou_threshold=conf.neg_iou_threshold)
     return image, loc_target, cls_target
Пример #10
0
    def __init__(self, n_channel=3, num_class=conf.num_class, image_size=conf.input_size):
        self.n_channel = n_channel
        self.num_class = num_class
        self.image_size = image_size

        self.images = tf.placeholder(dtype=tf.float32, shape=[None, self.image_size[0], self.image_size[1], self.n_channel],name='images')
        num_bbox = 4
        if conf.use_secondbig_loss_constrain:
            num_bbox = 8
        self.loc_trues = tf.placeholder(dtype=tf.float32, shape=[None, None, num_bbox], name='loc_target')  ##############
        self.cls_trues = tf.placeholder(dtype=tf.float32, shape=[None, None], name='cls_target')  ##############
        self.is_training = tf.placeholder(dtype=tf.bool, name='traing_mode')

        self.global_step = tf.Variable(0, dtype=tf.int64, name='global_step')

        #self.logits = self.model(self.images, is_training=self.is_training)
        model = RetinaNet(conf.net)
        #self.loc_preds, self.cls_preds = model(self.images, training=self.is_training)     #retinanet1
        self.loc_preds, self.cls_preds = model(self.images, is_training=self.is_training)   #retinanet2

        self.anchor_boxes = BoxEncoder().get_anchor_boxes(self.image_size)
        self.d_bboxes, self.d_cls_pred, self.d_score = BoxEncoder().decode(self.loc_preds[0], self.cls_preds[0], self.image_size, tf_box_order=conf.tf_box_order)


        self.loc_loss, self.cls_loss, self.iou_loss = loss_fn(self.loc_preds, self.loc_trues, self.cls_preds, self.cls_trues, self.anchor_boxes, num_classes=self.num_class)

        #self.loc_loss, self.cls_loss = loss_fn(self.loc_preds, self.loc_trues, self.cls_preds, self.cls_trues, num_classes=self.num_class)

        self.regularization_loss = tf.losses.get_regularization_loss()

        tf.add_to_collection('losses', self.loc_loss)
        tf.add_to_collection('losses', self.cls_loss)
        tf.add_to_collection('losses', self.iou_loss)
        tf.add_to_collection('losses', self.regularization_loss)
        self.total_loss = tf.add_n(tf.get_collection('losses'))

        tf.summary.scalar('loc_loss', self.loc_loss)
        tf.summary.scalar('cls_loss', self.cls_loss)
        tf.summary.scalar('total_loss', self.total_loss)

        # lr = tf.cond(tf.less(self.global_step, 5000),
        #              lambda: tf.constant(0.0001),
        #              lambda: tf.cond(tf.less(self.global_step, 8000),
        #                              lambda: tf.constant(0.00005),
        #                              lambda: tf.cond(tf.less(self.global_step, 12000),
        #                                              lambda: tf.constant(0.000025),
        #                                              lambda: tf.constant(0.00001))))

        #lr = tf.train.exponential_decay(0.0001, self.global_step, 1000, 0.96, staircase=True)
        #self.optimizer = tf.train.AdamOptimizer(learning_rate=lr)#.minimize(self.avg_loss, global_step=self.global_step)
        #self.optimizer=tf.train.GradientDescentOptimizer(learning_rate=lr)#.minimize(self.avg_loss, global_step=self.global_step)
        #self.optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9, use_nesterov=True)

        self.lr = tf.Variable(float(1e-5), trainable=False, dtype=tf.float32)
        self.learning_rate_decay_op = self.lr.assign(self.lr * 0.1)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)


        tf.summary.scalar('learning_rate', self.lr)


        # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        # with tf.control_dependencies(update_ops):
        #     #self.train_op = self.optimizer.minimize(self.total_loss, global_step=self.global_step)
        #
        #     grads = self.optimizer.compute_gradients(self.total_loss)
        #     for i, (g, v) in enumerate(grads):
        #         if g is not None:
        #             grads[i] = (tf.clip_by_norm(g, 5), v)  # clip gradients
        #     self.train_op = self.optimizer.apply_gradients(grads, global_step=self.global_step)

        vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[180:]

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            if args.grad_clip_norm is not None:
                grads_and_vars = self.optimizer.compute_gradients(self.total_loss)
                grads = [x[0] for x in grads_and_vars]
                vars = [x[1] for x in grads_and_vars]
                grads, _ = tf.clip_by_global_norm(grads, args.grad_clip_norm)
                self.train_op = self.optimizer.apply_gradients(zip(grads, vars), global_step=self.global_step)
            else:
                self.train_op = self.optimizer.minimize(self.total_loss,  global_step=self.global_step)
                #self.train_op = self.optimizer.minimize(self.total_loss, var_list=vars1, global_step=self.global_step)

        tf.add_to_collection('input_tensor',self.images)
        tf.add_to_collection('input_tensor',self.loc_trues)
        tf.add_to_collection('input_tensor',self.cls_trues)
        tf.add_to_collection('input_tensor',self.is_training)
        tf.add_to_collection('output_tensor', self.loc_preds)
        tf.add_to_collection('output_tensor', self.cls_preds)
        tf.add_to_collection('decode_tensor', self.d_bboxes)
        tf.add_to_collection('decode_tensor', self.d_cls_pred)
        tf.add_to_collection('decode_tensor', self.d_score)
Пример #11
0
def evalution():
    # gpu_options = tf.GPUOptions(allow_growth=True)
    # config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.25

    graph = tf.Graph()
    sess = tf.Session(graph=graph, config=config)
    with graph.as_default():

        model_ckpt_path = './checkpoints/retinanet2_mojing2/1cls_224x384_6ssd_a7_1b_nop7_p045_n04_alph09_Res50_rbw/model_12.ckpt'
        saver = tf.train.import_meta_graph(model_ckpt_path + '.meta')
        saver.restore(sess, model_ckpt_path)

        input = graph.get_tensor_by_name('images:0')
        is_training = graph.get_tensor_by_name('traing_mode:0')
        loc_preds = tf.get_collection('output_tensor')[0]
        cls_preds = tf.get_collection('output_tensor')[1]

        input_size = (224, 384)
        box_encoder = BoxEncoder()
        _bboxes, _cls_pred, _score = box_encoder.decode(
            loc_preds[0], cls_preds[0], input_size)
        label_2_name_map = label_to_name_map()
        print("\n>>>>>>>>>>>>>>Test<<<<<<<<<<<<<<<<<<<\n")

        for file_name in os.listdir(image_test_dir):

            #######################
            # # image_test_dir ='/workspace/tensorflow/object_det/data/body_detection_data/mirror/spring/v0_JPEGImages'
            # # spring_test = "/workspace/tensorflow/object_det/data/body_detection_data/mirror/spring/test.txt"
            # image_test_dir ='/workspace/tensorflow/object_det/data/det_img_test/image'
            # spring_test = "/workspace/tensorflow/object_det/data/det_img_test/test.txt"
            # with open(spring_test, 'r') as f:
            #     lines = f.readlines()
            #     imgname_list = []
            #     for i in range(0, len(lines)):
            #         imgname = lines[i].rstrip('\n').split(' ')[-1]
            #         imgname_list.append(imgname)
            #     f.close()
            # result_dir = './inference2/mix_12_1cls_224x384_6ssd_a7_1b_nop7_p045_n04_alph09_Res50_rbw_nms04_s07'
            # #np.random.shuffle(imgname_list)
            # for file_name in imgname_list:
            ##################

            img_path = os.path.join(image_test_dir, file_name)
            if not os.path.exists(img_path):
                print("img load error!!")
                continue
            image = cv2.imread(img_path)
            im_h, im_w, _ = image.shape
            output_size = (im_h, im_w)

            image = cv2.resize(
                image, (input_size[1], input_size[0]))  #.astype('float32')
            b, g, r = cv2.split(image)
            image = cv2.merge([r, g, b])
            #image = image - np.array([123.68, 116.78, 103.94])
            image = image.astype(np.float32) * (1. / 255)
            if conf.net == 'ShuffleNetV2':
                image = (2.0 * image) - 1.0
            if channels_first:
                image = image.transpose((2, 0, 1))
            image_4D = np.expand_dims(image, axis=0)

            start_t = time.time()
            [bboxes, cls_pred,
             score] = sess.run(fetches=[_bboxes, _cls_pred, _score],
                               feed_dict={
                                   input: image_4D,
                                   is_training: False
                               })
            print("used time: [{:.2f}]".format((time.time() - start_t) * 1000))
            print(" ~~~~~~~~~inference over ~~~~~~~~~~~~~~~~~")

            draw_bbox(result_dir, label_2_name_map, img_path, file_name,
                      bboxes, cls_pred, score, input_size, output_size)
            print("draw over")
    sess.close()