Esempio n. 1
0
    def get_FPS(self, image, test_interval):
        image_shape = np.array(np.shape(image)[0:2])
        #---------------------------------------------------------#
        #   给图像增加灰条,实现不失真的resize
        #---------------------------------------------------------#
        crop_img = letterbox_image(image, [self.input_shape[0],self.input_shape[1]])
        #----------------------------------------------------------------------------------#
        #   将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        #----------------------------------------------------------------------------------#
        photo = np.array(crop_img,dtype = np.float32)[:,:,::-1]
        #-----------------------------------------------------------#
        #   图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]
        #-----------------------------------------------------------#
        photo = np.reshape(preprocess_image(photo), [1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])

        preds = self.centernet.predict(photo)
        
        if self.nms:
            preds = np.array(nms(preds, self.nms_threhold))

        if len(preds[0])>0:
            preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)
            
            det_label = preds[0][:, -1]
            det_conf = preds[0][:, -2]
            det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[0][:, 1], preds[0][:, 2], preds[0][:, 3]

            top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]
            top_conf = det_conf[top_indices]
            top_label_indices = det_label[top_indices].tolist()
            top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)
            
            boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.input_shape[0],self.input_shape[1]]),image_shape)

         
        t1 = time.time()
        for _ in range(test_interval):
            preds = self.centernet.predict(photo)
            
            if self.nms:
                preds = np.array(nms(preds, self.nms_threhold))

            if len(preds[0])>0:
                preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)
                
                det_label = preds[0][:, -1]
                det_conf = preds[0][:, -2]
                det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[0][:, 1], preds[0][:, 2], preds[0][:, 3]

                top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]
                top_conf = det_conf[top_indices]
                top_label_indices = det_label[top_indices].tolist()
                top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)
                
                boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.input_shape[0],self.input_shape[1]]),image_shape)

        t2 = time.time()
        tact_time = (t2 - t1) / test_interval
        return tact_time
Esempio n. 2
0
    def detect_image(self, image_id, image):
        f = open("./input/detection-results/" + image_id + ".txt", "w")
        self.confidence = 0.01
        self.nms_threhold = 0.5

        image_shape = np.array(np.shape(image)[0:2])
        crop_img = letterbox_image(image,
                                   [self.input_shape[0], self.input_shape[1]])
        # 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]

        # 图片预处理,归一化
        photo = np.reshape(
            preprocess_image(photo),
            [1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
        preds = self.centernet.predict(photo)

        if self.nms:
            preds = np.array(nms(preds, self.nms_threhold))

        if len(preds[0]) <= 0:
            return image

        preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)

        # 筛选出其中得分高于confidence的框
        det_label = preds[0][:, -1]
        det_conf = preds[0][:, -2]
        det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[
            0][:, 1], preds[0][:, 2], preds[0][:, 3]

        top_indices = [
            i for i, conf in enumerate(det_conf) if conf >= self.confidence
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            det_xmin[top_indices],
            -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                det_xmax[top_indices],
                -1), np.expand_dims(det_ymax[top_indices], -1)

        # 去掉灰条
        boxes = centernet_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.input_shape[0], self.input_shape[1]]), image_shape)

        for i, c in enumerate(top_label_indices):
            predicted_class = self.class_names[int(c)]
            score = str(top_conf[i])

            top, left, bottom, right = boxes[i]
            f.write("%s %s %s %s %s %s\n" %
                    (predicted_class, score[:6], str(int(left)), str(
                        int(top)), str(int(right)), str(int(bottom))))

        f.close()
        return
Esempio n. 3
0
    def get_FPS(self, image, test_interval):
        image_shape = np.array(np.shape(image)[0:2])
        #---------------------------------------------------------#
        #   给图像增加灰条,实现不失真的resize
        #---------------------------------------------------------#
        crop_img = letterbox_image(image,
                                   [self.image_size[0], self.image_size[1]])
        #----------------------------------------------------------------------------------#
        #   将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        #----------------------------------------------------------------------------------#
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]
        #-----------------------------------------------------------#
        #   图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]
        #-----------------------------------------------------------#
        photo = np.reshape(
            np.transpose(preprocess_image(photo), (2, 0, 1)),
            [1, self.image_size[2], self.image_size[0], self.image_size[1]])

        with torch.no_grad():
            images = Variable(
                torch.from_numpy(np.asarray(photo)).type(torch.FloatTensor))
            if self.cuda:
                images = images.cuda()
            outputs = self.centernet(images)

            if self.backbone == 'hourglass':
                outputs = [
                    outputs[-1]["hm"].sigmoid(), outputs[-1]["wh"],
                    outputs[-1]["reg"]
                ]
            outputs = decode_bbox(outputs[0], outputs[1], outputs[2],
                                  self.image_size, self.confidence, self.cuda)

            try:
                if self.nms:
                    outputs = np.array(nms(outputs, self.nms_threhold))

                output = outputs[0]
                if len(output) > 0:
                    batch_boxes, det_conf, det_label = output[:, :
                                                              4], output[:,
                                                                         4], output[:,
                                                                                    5]

                    det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:,
                                                                         0], batch_boxes[:,
                                                                                         1], batch_boxes[:,
                                                                                                         2], batch_boxes[:,
                                                                                                                         3]
                    top_indices = [
                        i for i, conf in enumerate(det_conf)
                        if conf >= self.confidence
                    ]
                    top_conf = det_conf[top_indices]
                    top_label_indices = det_label[top_indices].tolist()
                    top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
                        det_xmin[top_indices], -1), np.expand_dims(
                            det_ymin[top_indices], -1), np.expand_dims(
                                det_xmax[top_indices],
                                -1), np.expand_dims(det_ymax[top_indices], -1)

                    boxes = centernet_correct_boxes(
                        top_ymin, top_xmin, top_ymax, top_xmax,
                        np.array([self.image_size[0], self.image_size[1]]),
                        image_shape)
            except:
                pass

        t1 = time.time()
        for _ in range(test_interval):
            with torch.no_grad():
                outputs = self.centernet(images)

                if self.backbone == 'hourglass':
                    outputs = [
                        outputs[-1]["hm"].sigmoid(), outputs[-1]["wh"],
                        outputs[-1]["reg"]
                    ]
                outputs = decode_bbox(outputs[0], outputs[1], outputs[2],
                                      self.image_size, self.confidence,
                                      self.cuda)

                try:
                    if self.nms:
                        outputs = np.array(nms(outputs, self.nms_threhold))

                    output = outputs[0]
                    if len(output) > 0:
                        batch_boxes, det_conf, det_label = output[:, :
                                                                  4], output[:,
                                                                             4], output[:,
                                                                                        5]

                        det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:,
                                                                             0], batch_boxes[:,
                                                                                             1], batch_boxes[:,
                                                                                                             2], batch_boxes[:,
                                                                                                                             3]
                        top_indices = [
                            i for i, conf in enumerate(det_conf)
                            if conf >= self.confidence
                        ]
                        top_conf = det_conf[top_indices]
                        top_label_indices = det_label[top_indices].tolist()
                        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
                            det_xmin[top_indices], -1), np.expand_dims(
                                det_ymin[top_indices],
                                -1), np.expand_dims(det_xmax[top_indices],
                                                    -1), np.expand_dims(
                                                        det_ymax[top_indices],
                                                        -1)

                        boxes = centernet_correct_boxes(
                            top_ymin, top_xmin, top_ymax, top_xmax,
                            np.array([self.image_size[0], self.image_size[1]]),
                            image_shape)
                except:
                    pass
        t2 = time.time()
        tact_time = (t2 - t1) / test_interval
        return tact_time
Esempio n. 4
0
    def detect_image(self, image):
        image_shape = np.array(np.shape(image)[0:2])
        #---------------------------------------------------------#
        #   给图像增加灰条,实现不失真的resize
        #---------------------------------------------------------#
        crop_img = letterbox_image(image,
                                   [self.input_shape[0], self.input_shape[1]])
        #----------------------------------------------------------------------------------#
        #   将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        #----------------------------------------------------------------------------------#
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]
        #-----------------------------------------------------------#
        #   图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]
        #-----------------------------------------------------------#
        photo = np.reshape(
            preprocess_image(photo),
            [1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])

        preds = self.get_pred(photo).numpy()
        #-------------------------------------------------------#
        #   对于centernet网络来讲,确立中心非常重要。
        #   对于大目标而言,会存在许多的局部信息。
        #   此时对于同一个大目标,中心点比较难以确定。
        #   使用最大池化的非极大抑制方法无法去除局部框
        #   所以我还是写了另外一段对框进行非极大抑制的代码
        #   实际测试中,hourglass为主干网络时有无额外的nms相差不大,resnet相差较大。
        #-------------------------------------------------------#
        if self.nms:
            preds = np.array(nms(preds, self.nms_threhold))

        if len(preds[0]) <= 0:
            return image

        #-----------------------------------------------------------#
        #   将预测结果转换成小数的形式
        #-----------------------------------------------------------#
        preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)

        det_label = preds[0][:, -1]
        det_conf = preds[0][:, -2]
        det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[
            0][:, 1], preds[0][:, 2], preds[0][:, 3]
        #-----------------------------------------------------------#
        #   筛选出其中得分高于confidence的框
        #-----------------------------------------------------------#
        top_indices = [
            i for i, conf in enumerate(det_conf) if conf >= self.confidence
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            det_xmin[top_indices],
            -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                det_xmax[top_indices],
                -1), np.expand_dims(det_ymax[top_indices], -1)

        #-----------------------------------------------------------#
        #   去掉灰条部分
        #-----------------------------------------------------------#
        boxes = centernet_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.input_shape[0], self.input_shape[1]]), image_shape)

        font = ImageFont.truetype(font='model_data/simhei.ttf',
                                  size=np.floor(3e-2 * np.shape(image)[1] +
                                                0.5).astype('int32'))

        thickness = max(
            (np.shape(image)[0] + np.shape(image)[1]) // self.input_shape[0],
            1)

        for i, c in enumerate(top_label_indices):
            predicted_class = self.class_names[int(c)]
            score = top_conf[i]

            top, left, bottom, right = boxes[i]
            top = top - 5
            left = left - 5
            bottom = bottom + 5
            right = right + 5

            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(
                np.shape(image)[0],
                np.floor(bottom + 0.5).astype('int32'))
            right = min(
                np.shape(image)[1],
                np.floor(right + 0.5).astype('int32'))

            # 画框框
            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)
            label = label.encode('utf-8')
            print(label, top, left, bottom, right)

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=self.colors[int(c)])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=self.colors[int(c)])
            draw.text(text_origin,
                      str(label, 'UTF-8'),
                      fill=(0, 0, 0),
                      font=font)
            del draw
        return image
Esempio n. 5
0
    def detect_image(self, image_id, image):
        f = open("./input/detection-results/" + image_id + ".txt", "w")
        self.confidence = 0.01
        self.nms_threhold = 0.5

        image_shape = np.array(np.shape(image)[0:2])
        #---------------------------------------------------------#
        #   给图像增加灰条,实现不失真的resize
        #---------------------------------------------------------#
        crop_img = letterbox_image(image,
                                   [self.input_shape[0], self.input_shape[1]])
        #----------------------------------------------------------------------------------#
        #   将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        #----------------------------------------------------------------------------------#
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]
        #-----------------------------------------------------------#
        #   图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]
        #-----------------------------------------------------------#
        photo = np.reshape(
            preprocess_image(photo),
            [1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])

        preds = self.centernet.predict(photo)
        #--------------------------------------------------------------------------#
        #   对于centernet网络来讲,确立中心非常重要。
        #   对于大目标而言,会存在许多的局部信息。
        #   此时对于同一个大目标,中心点比较难以确定。
        #   使用最大池化的非极大抑制方法无法去除局部框
        #   所以我还是写了另外一段对框进行非极大抑制的代码
        #   实际测试中,hourglass为主干网络时有无额外的nms相差不大,resnet相差较大。
        #---------------------------------------------------------------------------#
        if self.nms:
            preds = np.array(nms(preds, self.nms_threhold))

        if len(preds[0]) <= 0:
            return

        #-----------------------------------------------------------#
        #   将预测结果转换成小数的形式
        #-----------------------------------------------------------#
        preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)

        #-----------------------------------------------------------#
        #   筛选出其中得分高于confidence的框
        #-----------------------------------------------------------#
        det_label = preds[0][:, -1]
        det_conf = preds[0][:, -2]
        det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[
            0][:, 1], preds[0][:, 2], preds[0][:, 3]

        top_indices = [
            i for i, conf in enumerate(det_conf) if conf >= self.confidence
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            det_xmin[top_indices],
            -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                det_xmax[top_indices],
                -1), np.expand_dims(det_ymax[top_indices], -1)

        #-----------------------------------------------------------#
        #   去掉灰条部分
        #-----------------------------------------------------------#
        boxes = centernet_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.input_shape[0], self.input_shape[1]]), image_shape)

        for i, c in enumerate(top_label_indices):
            predicted_class = self.class_names[int(c)]
            score = str(top_conf[i])

            top, left, bottom, right = boxes[i]
            f.write("%s %s %s %s %s %s\n" %
                    (predicted_class, score[:6], str(int(left)), str(
                        int(top)), str(int(right)), str(int(bottom))))

        f.close()
        return
Esempio n. 6
0
    def detect_image(self, image):
        image_shape = np.array(np.shape(image)[0:2])

        crop_img = letterbox_image(image,
                                   [self.image_size[0], self.image_size[1]])

        # 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]

        # 图片预处理,归一化
        photo = np.reshape(
            np.transpose(preprocess_image(photo), (2, 0, 1)),
            [1, self.image_size[2], self.image_size[0], self.image_size[1]])

        with torch.no_grad():
            photo = np.asarray(photo)

            images = Variable(torch.from_numpy(photo).type(torch.FloatTensor))
            if self.cuda:
                images = images.cuda()

            outputs = self.centernet(images)
            if self.backbone == 'hourglass':
                outputs = [
                    outputs[-1]["hm"].sigmoid(), outputs[-1]["wh"],
                    outputs[-1]["reg"]
                ]
            outputs = decode_bbox(outputs[0], outputs[1], outputs[2],
                                  self.image_size, self.confidence, self.cuda)

        #-------------------------------------------------------#
        #   对于centernet网络来讲,确立中心非常重要。
        #   对于大目标而言,会存在许多的局部信息。
        #   此时对于同一个大目标,中心点比较难以确定。
        #   使用最大池化的非极大抑制方法无法去除局部框
        #   所以我还是写了另外一段对框进行非极大抑制的代码
        #   实际测试中,hourglass为主干网络时有无额外的nms相差不大,resnet相差较大。
        #-------------------------------------------------------#
        try:
            if self.nms:
                outputs = np.array(nms(outputs, self.nms_threhold))
        except:
            pass

        output = outputs[0]
        if len(output) <= 0:
            return image

        batch_boxes, det_conf, det_label = output[:, :4], output[:,
                                                                 4], output[:,
                                                                            5]
        # 筛选出其中得分高于confidence的框
        det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:,
                                                             0], batch_boxes[:,
                                                                             1], batch_boxes[:,
                                                                                             2], batch_boxes[:,
                                                                                                             3]

        top_indices = [
            i for i, conf in enumerate(det_conf) if conf >= self.confidence
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            det_xmin[top_indices],
            -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                det_xmax[top_indices],
                -1), np.expand_dims(det_ymax[top_indices], -1)

        # 去掉灰条
        boxes = centernet_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.image_size[0], self.image_size[1]]), image_shape)

        font = ImageFont.truetype(font='model_data/simhei.ttf',
                                  size=np.floor(3e-2 * np.shape(image)[1] +
                                                0.5).astype('int32'))

        thickness = (np.shape(image)[0] +
                     np.shape(image)[1]) // self.image_size[0]

        for i, c in enumerate(top_label_indices):
            predicted_class = self.class_names[int(c)]
            score = top_conf[i]

            top, left, bottom, right = boxes[i]
            top = top - 5
            left = left - 5
            bottom = bottom + 5
            right = right + 5

            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(
                np.shape(image)[0],
                np.floor(bottom + 0.5).astype('int32'))
            right = min(
                np.shape(image)[1],
                np.floor(right + 0.5).astype('int32'))

            # 画框框
            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)
            label = label.encode('utf-8')
            print(label)

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=self.colors[int(c)])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=self.colors[int(c)])
            draw.text(text_origin,
                      str(label, 'UTF-8'),
                      fill=(0, 0, 0),
                      font=font)
            del draw
        return image
Esempio n. 7
0
    def detect_image(self, image_id, image):
        f = open("./input/detection-results/" + image_id + ".txt", "w")
        self.confidence = 0.01
        self.nms_threhold = 0.5

        image_shape = np.array(np.shape(image)[0:2])

        crop_img = letterbox_image(image,
                                   [self.image_size[0], self.image_size[1]])

        # 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]

        # 图片预处理,归一化
        photo = np.reshape(
            np.transpose(preprocess_image(photo), (2, 0, 1)),
            [1, self.image_size[2], self.image_size[0], self.image_size[1]])

        with torch.no_grad():
            photo = np.asarray(photo)

            images = Variable(torch.from_numpy(photo).type(torch.FloatTensor))
            if self.cuda:
                images = images.cuda()

            outputs = self.centernet(images)
            if self.backbone == 'hourglass':
                outputs = [
                    outputs[-1]["hm"].sigmoid(), outputs[-1]["wh"],
                    outputs[-1]["reg"]
                ]
            outputs = decode_bbox(outputs[0], outputs[1], outputs[2],
                                  self.image_size, self.confidence, self.cuda)

        try:
            if self.nms:
                outputs = np.array(nms(outputs, self.nms_threhold))
        except:
            pass

        output = outputs[0]
        if len(output) <= 0:
            return image

        batch_boxes, det_conf, det_label = output[:, :4], output[:,
                                                                 4], output[:,
                                                                            5]

        # 筛选出其中得分高于confidence的框
        det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:,
                                                             0], batch_boxes[:,
                                                                             1], batch_boxes[:,
                                                                                             2], batch_boxes[:,
                                                                                                             3]

        top_indices = [
            i for i, conf in enumerate(det_conf) if conf >= self.confidence
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            det_xmin[top_indices],
            -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                det_xmax[top_indices],
                -1), np.expand_dims(det_ymax[top_indices], -1)

        # 去掉灰条
        boxes = centernet_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.image_size[0], self.image_size[1]]), image_shape)

        for i, c in enumerate(top_label_indices):
            predicted_class = self.class_names[int(c)]
            score = str(top_conf[i])

            top, left, bottom, right = boxes[i]
            f.write("%s %s %s %s %s %s\n" %
                    (predicted_class, score[:6], str(int(left)), str(
                        int(top)), str(int(right)), str(int(bottom))))

        f.close()
        return
Esempio n. 8
0
    def detect_image(self, image_id, image):
        f = open("./input/detection-results/" + image_id + ".txt", "w")
        self.confidence = 0.01
        self.nms_threhold = 0.5

        image_shape = np.array(np.shape(image)[0:2])
        #---------------------------------------------------------#
        #   给图像增加灰条,实现不失真的resize
        #---------------------------------------------------------#
        crop_img = letterbox_image(image,
                                   [self.image_size[0], self.image_size[1]])
        #----------------------------------------------------------------------------------#
        #   将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        #----------------------------------------------------------------------------------#
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]
        #-----------------------------------------------------------#
        #   图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]
        #-----------------------------------------------------------#
        photo = np.reshape(
            np.transpose(preprocess_image(photo), (2, 0, 1)),
            [1, self.image_size[2], self.image_size[0], self.image_size[1]])

        with torch.no_grad():
            images = Variable(
                torch.from_numpy(np.asarray(photo)).type(torch.FloatTensor))
            if self.cuda:
                images = images.cuda()

            outputs = self.centernet(images)
            if self.backbone == 'hourglass':
                outputs = [
                    outputs[-1]["hm"].sigmoid(), outputs[-1]["wh"],
                    outputs[-1]["reg"]
                ]
            #-----------------------------------------------------------#
            #   利用预测结果进行解码
            #-----------------------------------------------------------#
            outputs = decode_bbox(outputs[0], outputs[1], outputs[2],
                                  self.image_size, self.confidence, self.cuda)

            #-------------------------------------------------------#
            #   对于centernet网络来讲,确立中心非常重要。
            #   对于大目标而言,会存在许多的局部信息。
            #   此时对于同一个大目标,中心点比较难以确定。
            #   使用最大池化的非极大抑制方法无法去除局部框
            #   所以我还是写了另外一段对框进行非极大抑制的代码
            #   实际测试中,hourglass为主干网络时有无额外的nms相差不大,resnet相差较大。
            #-------------------------------------------------------#
            try:
                if self.nms:
                    outputs = np.array(nms(outputs, self.nms_threhold))
            except:
                pass

            output = outputs[0]
            if len(output) <= 0:
                return image

            batch_boxes, det_conf, det_label = output[:, :
                                                      4], output[:,
                                                                 4], output[:,
                                                                            5]
            det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:,
                                                                 0], batch_boxes[:,
                                                                                 1], batch_boxes[:,
                                                                                                 2], batch_boxes[:,
                                                                                                                 3]
            #-----------------------------------------------------------#
            #   筛选出其中得分高于confidence的框
            #-----------------------------------------------------------#
            top_indices = [
                i for i, conf in enumerate(det_conf) if conf >= self.confidence
            ]
            top_conf = det_conf[top_indices]
            top_label_indices = det_label[top_indices].tolist()
            top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
                det_xmin[top_indices],
                -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                    det_xmax[top_indices],
                    -1), np.expand_dims(det_ymax[top_indices], -1)

            #-----------------------------------------------------------#
            #   去掉灰条部分
            #-----------------------------------------------------------#
            boxes = centernet_correct_boxes(
                top_ymin, top_xmin, top_ymax, top_xmax,
                np.array([self.image_size[0], self.image_size[1]]),
                image_shape)

        for i, c in enumerate(top_label_indices):
            predicted_class = self.class_names[int(c)]
            score = str(top_conf[i])

            top, left, bottom, right = boxes[i]
            f.write("%s %s %s %s %s %s\n" %
                    (predicted_class, score[:6], str(int(left)), str(
                        int(top)), str(int(right)), str(int(bottom))))

        f.close()
        return
Esempio n. 9
0
    def detect_image(self, image_id, image, results):
        self.confidence = 0.01
        self.nms_threhold = 0.5

        image_shape = np.array(np.shape(image)[0:2])
        crop_img = letterbox_image(image,
                                   [self.input_shape[0], self.input_shape[1]])
        # 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        photo = np.array(crop_img, dtype=np.float32)[:, :, ::-1]

        # 图片预处理,归一化
        photo = np.reshape(
            preprocess_image(photo),
            [1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
        preds = self.centernet.predict(photo)

        if self.nms:
            preds = np.array(nms(preds, self.nms_threhold))

        if len(preds[0]) <= 0:
            return results

        preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)

        # 筛选出其中得分高于confidence的框
        det_label = preds[0][:, -1]
        det_conf = preds[0][:, -2]
        det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[
            0][:, 1], preds[0][:, 2], preds[0][:, 3]

        top_indices = [
            i for i, conf in enumerate(det_conf) if conf >= self.confidence
        ]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            det_xmin[top_indices],
            -1), np.expand_dims(det_ymin[top_indices], -1), np.expand_dims(
                det_xmax[top_indices],
                -1), np.expand_dims(det_ymax[top_indices], -1)

        # 去掉灰条
        boxes = centernet_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.input_shape[0], self.input_shape[1]]), image_shape)

        for i, c in enumerate(top_label_indices):
            result = {}
            predicted_class = self.class_names[int(c)]
            top, left, bottom, right = boxes[i]

            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))

            result["image_id"] = int(image_id)
            result["category_id"] = clsid2catid[c]
            result["bbox"] = [
                float(left),
                float(top),
                float(right - left),
                float(bottom - top)
            ]
            result["score"] = float(top_conf[i])
            results.append(result)

        return results
Esempio n. 10
0
    def detect_image(self, image_id, image, results):
        self.confidence = 0.01
        self.nms_threhold = 0.5

        image_shape = np.array(np.shape(image)[0:2])
        crop_img = letterbox_image(image, [self.image_size[0],self.image_size[1]])
        # 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
        photo = np.array(crop_img,dtype = np.float32)[:,:,::-1]
        # 图片预处理,归一化
        photo = np.reshape(np.transpose(preprocess_image(photo), (2, 0, 1)),[1,self.image_size[2],self.image_size[0],self.image_size[1]])
        
        with torch.no_grad():
            photo = np.asarray(photo)
            
            images = Variable(torch.from_numpy(photo).type(torch.FloatTensor))
            if self.cuda:
                images = images.cuda()

            outputs = self.centernet(images)
            if self.backbone=='hourglass':
                outputs = [outputs[-1]["hm"].sigmoid(), outputs[-1]["wh"], outputs[-1]["reg"]]
            outputs = decode_bbox(outputs[0],outputs[1],outputs[2],self.image_size,self.confidence,self.cuda)

        try:
            if self.nms:
                outputs = np.array(nms(outputs,self.nms_threhold))
        except:
            pass
        
        output = outputs[0]
        if len(output)<=0:
            return image

        batch_boxes, det_conf, det_label = output[:,:4], output[:,4], output[:,5]

        # 筛选出其中得分高于confidence的框
        det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:, 0], batch_boxes[:, 1], batch_boxes[:, 2], batch_boxes[:, 3]

        top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]
        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)
        
        # 去掉灰条
        boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.image_size[0],self.image_size[1]]),image_shape)

        for i, c in enumerate(top_label_indices):
            result = {}
            predicted_class = self.class_names[int(c)]
            top, left, bottom, right = boxes[i]

            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))

            result["image_id"] = int(image_id)
            result["category_id"] = clsid2catid[c]
            result["bbox"] = [float(left),float(top),float(right-left),float(bottom-top)]
            result["score"] = float(top_conf[i])
            results.append(result)

        return results