def demo_detection(net, data_dir, image_name, CLASSES, gt_roidb): """Detect object classes in an image using pre-computed object proposals.""" # print cfg.TEST.SCALES # Load the demo image im1_file = os.path.join(data_dir, 'color', 'I' + image_name + '.jpg') im2_file = os.path.join(data_dir, 'thermal', 'I' + image_name + '.jpg') print im1_file print im2_file im1 = cv2.imread(im1_file) im2 = cv2.imread(im2_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect_2in(net, im1, im2) timer.toc() print('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class #CONF_THRESH = 0.1 NMS_THRESH = 0.3 # NMS_THRESH = [0.2,0.2, 0.2] all_dets = None #for cls_ind, cls in enumerate(classes): # cls_ind += 1 # because we skipped background for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack( (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] cls_inds = np.ones((len(keep), )) * cls_ind #cls_inds = np.ones((len(cls_scores),)) * cls_ind #cls_inds = np.ones((len(cls_scores),)) * (-1) #cls_inds[keep] = cls_ind dets = np.hstack((cls_inds[:, np.newaxis], dets)) if all_dets is None: all_dets = dets else: all_dets = np.vstack((all_dets, dets)) visual_detection_results(im1, boxes, scores, CLASSES, gt_roidb, threds=0.5) return all_dets
def detect(qf, qex, qdet, qtrack, qbox, qnum, f, isInited): caffe.set_mode_gpu() caffe.set_device(0) # f=1 # a=f net = caffe.Net(prototxt, caffemodel, caffe.TEST) data_dir = os.path.join(cfg.ROOT_DIR, 'data/demo_pedestrian/set07_V000') CLASSES = ('__background__', 'person') isInited.value = True while True: im = qdet.get() if (im == 'done'): break im = format(im, '05d') qex.put(im) # print(im) im1_file = os.path.join(data_dir, 'color', 'I' + im + '.jpg') im2_file = os.path.join(data_dir, 'thermal', 'I' + im + '.jpg') # print (im1_file) # print (im2_file) im1 = cv2.imread(im1_file) im2 = cv2.imread(im2_file) timer = Timer() timer.tic() scores, boxes = im_detect_2in(net, im1, im2) #f.value = False #fps.update() timer.toc() # print("1111111111111111111111111111111111111111111") # print (f'Detection took {timer.total_time}s') NMS_THRESH = 0.3 all_dets = None # cls_boxes = boxes[:, 4:4 * (cls_ind + 1)] box = [] for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack( (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] cls_inds = np.ones((len(keep), )) * cls_ind inds = np.where(dets[:, -1] > 0.5)[0] for i in inds: bbox = dets[i, :4] score = dets[i, -1] if (bbox[0] < 0): bbox[0] = 0 if (bbox[1] < 0): bbox[1] = 0 bb = (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]) box.append(bb) # ax.add_patch( # plt.Rectangle((bbox[0], bbox[1]), # bbox[2] - bbox[0], # bbox[3] - bbox[1], fill=False, # edgecolor=cls_color[2], linewidth=3.5) # ) # ax.text(bbox[0], bbox[1] - 2, # '{:s} {:.3f}'.format(cls, score), # bbox=dict(facecolor='blue', alpha=0.5), # fontsize=14, color='white') # qnum.put(inds) # if (len(box)>0): qbox.put(box) f.value = False #print("Detet: ",str(qnum.qsize())) # time.sleep(5) #print(1111) # qf.get() # print("f235556 is: ", str(qf.qsize())) print('out o det') qtrack.put('done')