def image_callback(self, data):
        self.show = self.convert_image(data)
        self.time_header = data.header

        if self.show is None: return

        # image = Image.fromarray(frame)
        step1 = time.time()
        frame_rgb = cv2.cvtColor(self.show, cv2.COLOR_BGR2RGB)
        frame_resized = cv2.resize(
            frame_rgb,
            (darknet.network_width(netMain), darknet.network_height(netMain)),
            interpolation=cv2.INTER_LINEAR)

        darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())

        detections = darknet.detect_image(netMain,
                                          metaMain,
                                          darknet_image,
                                          thresh=0.25)
        #detections is in x y w h in real pixel size already

        #boxs should receive in format similar to  [[584, 268, 160, 316]]

        if detections is None: return
        ##            image = cvDrawBoxes(detections, frame_resized)
        ##            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        ##            cv2.imshow('Demo', image)
        ##            cv2.waitKey(1)

        detections = np.array(detections)
        detections = detections[np.where(detections[:, 0] == b'person')]
        if detections is None: return
        #print("detections",detections)

        if len(detections) == 0: return
        c = np.delete(detections, [0, 1],
                      1).squeeze()  #remove id and conf, only bbox
        #print("c",c)
        #r =  #resize to coords on original image to compensate for the frame_resized
        if len(c.shape) == 0:
            boxs = np.array([c.tolist()])
        else:
            boxs = np.array([list(item) for item in c])  #formating
        #print("boxs",boxs)


##            boxs = xyxy_to_xywh(transformed)#.astype(np.uint8)
##

        boxs[:, 2] = (boxs[:, 2] / yolo_filter_size) * width  #w
        boxs[:, 3] = (boxs[:, 3] / yolo_filter_size) * height  #h

        boxs[:,
             0] = (boxs[:, 0] / yolo_filter_size) * width - boxs[:, 2] / 2  #x
        boxs[:,
             1] = (boxs[:, 1] / yolo_filter_size) * height - boxs[:, 3] / 2  #y

        print("time for inference =>" + str(time.time() - step1))
        #print(darknet.network_width(netMain),darknet.network_height(netMain)) #608 #608
        # print("box_num",len(boxs))
        features = encoder(self.show, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        for box in boxes:

            bbox = track.to_tlbr()
            try:
                cv2.rectangle(self.show, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            except ValueError:
                break

        self.posid_pub.publish(Int32(data=len(boxes)))

        self.track_pub.publish(self.bridge.cv2_to_imgmsg(self.show, "bgr8"))
Beispiel #2
0
    def image_callback(self,data):
            self.show = self.convert_image(data)
            self.time_header = data.header

            if self.show is None: return
  
            # image = Image.fromarray(frame)
            step1 = time.time()
            frame_rgb = cv2.cvtColor(self.show, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb,
                                   (darknet.network_width(netMain),
                                    darknet.network_height(netMain)),
                                   interpolation=cv2.INTER_LINEAR)

            darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())

            detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
            #detections is in x y w h in real pixel size already

            #boxs should receive in format similar to  [[584, 268, 160, 316]]

            if detections is None: return
##            image = cvDrawBoxes(detections, frame_resized)
##            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
##            cv2.imshow('Demo', image)
##            cv2.waitKey(1)

            
            detections = np.array(detections)
            detections = detections[ np.where( detections[:,0]==b'person' ) ]
            if detections is None: return
            #print("detections",detections)

            if len(detections)==0: return
            c = np.delete(detections,[0,1],1).squeeze() #remove id and conf, only bbox
            #print("c",c)
            #r =  #resize to coords on original image to compensate for the frame_resized
            if len(c.shape)==0:
                boxs = np.array( [ c.tolist() ] )
            else:   
                boxs = np.array([list(item) for item in c]) #formating
            #print("boxs",boxs)

            self.fps_count += 1
            self.frame_count += 1

            #boxs = xyxy_to_xywh(boxs)#.astype(np.uint8)
            boxs = xywh_to_xyxy(boxs)
            
            print(boxs)         

            boxs[:,2] = (boxs[:,2] /yolo_filter_size) * width  #w
            boxs[:,3] = (boxs[:,3] /yolo_filter_size) * height #h

            boxs[:,0] = (boxs[:,0] /yolo_filter_size) * width   #- boxs[:,2]/2#x
            boxs[:,1] = (boxs[:,1] /yolo_filter_size) * height  #- boxs[:,3]/2#y
            
            print("time for inference =>"+str(time.time()-step1))
            #print(darknet.network_width(netMain),darknet.network_height(netMain)) #608 #608
            # print("box_num",len(boxs))
            
            for bbox in boxs:

                box_width  = bbox[2] - bbox[0]
                box_height = bbox[3] - bbox[1]

                bbox[0] = bbox[0] - int(box_width/2)
                bbox[1] = bbox[1] - int(box_height/2)
                
                self.posid_array = Path()
            
                try:
                    cv2.rectangle(self.show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
                except ValueError:
                    break
                #cv2.putText(self.show, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)


            # 显示实时FPS值
            if (time.time() - self.start_time) > self.fps_interval:
                # 计算这个interval过程中的帧数,若interval为1秒,则为FPS
                self.realtime_fps = self.fps_count / (time.time() - self.start_time)
                self.fps_count = 0  # 帧数清零
                self.start_time = time.time()
            fps_label = "FPS:"+str(self.realtime_fps)
            cv2.putText(self.show, fps_label, (width-160, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)


            # 显示目前的运行时长及总帧数
            if self.frame_count == 1:
                self.run_timer = time.time()
            run_time = time.time() - self.run_timer
            time_frame_label = "Frame:"+str(self.frame_count)
            self.posid_pub.publish(Int32(data=len(boxs)))
            self.track_pub.publish(self.bridge.cv2_to_imgmsg(self.show, "bgr8"))
######## FAST YOLO wrapper
import os
cwd = os.path.dirname(__file__)
print(cwd)
configPath = cwd + "/external_yolov3/darknet/cfg/yolov3-spp.cfg"
#weightPath = cwd+"/external_yolov3/darknet/yolov3-spp.weights"
weightPath = cwd + "/external_yolov3/darknet/yolov3-spp-thermal.weights"
metaPath = cwd + "/external_yolov3/darknet/cfg/coco_accel.data"

netMain = darknet.load_net_custom(configPath.encode("ascii"),
                                  weightPath.encode("ascii"), 0,
                                  1)  # batch size = 1

metaMain = darknet.load_meta(metaPath.encode("ascii"))

darknet_image = darknet.make_image(darknet.network_width(netMain),
                                   darknet.network_height(netMain), 3)


###########
def convertBack(x, y, w, h):
    xmin = int(round(x - (w / 2)))
    xmax = int(round(x + (w / 2)))
    ymin = int(round(y - (h / 2)))
    ymax = int(round(y + (h / 2)))
    return xmin, ymin, xmax, ymax


def cvDrawBoxes(detections, img):
    for detection in detections:
        x, y, w, h = detection[2][0],\
Beispiel #4
0
    def image_callback(self,data):
            self.show = self.convert_image(data)
            self.time_header = data.header

            if self.show is None: return
  
            # image = Image.fromarray(frame)
            step1 = time.time()
            frame_rgb = cv2.cvtColor(self.show, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb,
                                   (darknet.network_width(netMain),
                                    darknet.network_height(netMain)),
                                   interpolation=cv2.INTER_LINEAR)

            darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())

            detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
            #detections is in x y w h in real pixel size already

            #boxs should receive in format similar to  [[584, 268, 160, 316]]

            if detections is None: return
##            image = cvDrawBoxes(detections, frame_resized)
##            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
##            cv2.imshow('Demo', image)
##            cv2.waitKey(1)

            
            detections = np.array(detections)
            detections = detections[ np.where( detections[:,0]==b'person' ) ]
            if detections is None: return
            #print("detections",detections)

            if len(detections)==0: return
            c = np.delete(detections,[0,1],1).squeeze() #remove id and conf, only bbox
            #print("c",c)
            #r =  #resize to coords on original image to compensate for the frame_resized
            if len(c.shape)==0:
                boxs = np.array( [ c.tolist() ] )
            else:   
                boxs = np.array([list(item) for item in c]) #formating
            #print("boxs",boxs)

            self.fps_count += 1
            self.frame_count += 1

##            boxs = xyxy_to_xywh(transformed)#.astype(np.uint8)
##
            
            boxs[:,2] = (boxs[:,2] /yolo_filter_size) * width  #w
            boxs[:,3] = (boxs[:,3] /yolo_filter_size) * height #h

            boxs[:,0] = (boxs[:,0] /yolo_filter_size) * width   - boxs[:,2]/2#x
            boxs[:,1] = (boxs[:,1] /yolo_filter_size) * height  - boxs[:,3]/2#y
            
            print("time for inference =>"+str(time.time()-step1))
            #print(darknet.network_width(netMain),darknet.network_height(netMain)) #608 #608
            # print("box_num",len(boxs))
            features = encoder(self.show,boxs)
            
            # score to 1.0 here).
            detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
            
            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]
            
            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                
                self.posid_array = Path()
                
                bbox = track.to_tlbr()
                try:
                    cv2.rectangle(self.show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
                except ValueError:
                    break
                cv2.putText(self.show, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)


            # 显示实时FPS值
            if (time.time() - self.start_time) > self.fps_interval:
                # 计算这个interval过程中的帧数,若interval为1秒,则为FPS
                self.realtime_fps = self.fps_count / (time.time() - self.start_time)
                self.fps_count = 0  # 帧数清零
                self.start_time = time.time()
            fps_label = 'FPS:{0:.2f}'.format(self.realtime_fps)
            cv2.putText(self.show, fps_label, (width-160, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)


            # 显示目前的运行时长及总帧数
            if self.frame_count == 1:
                self.run_timer = time.time()
            run_time = time.time() - self.run_timer
            time_frame_label = '[Time:{0:.2f} | Frame:{1}]'.format(run_time, self.frame_count)
            cv2.putText(self.show, time_frame_label, (5, height-15), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
            
            self.posid_pub.publish(Int32(data=len(boxes)))
            self.track_pub.publish(self.bridge.cv2_to_imgmsg(self.show, "bgr8"))