def test_model(self, opt, val_data, weight_path, out_path): self.model_all.load_weights(weight_path, by_name=True) print 'load weights from {}'.format(weight_path) res_all = [] #用来保存预测结果 res_file = os.path.join(out_path, 'val_det.txt') start_time = time.time() for f in range(len(val_data)): filepath = val_data[f]['filepath'] # print (filepath) frame_number = f + 1 img = cv2.imread(filepath) x_in = bbox_process.format_img(img, opt) #Y表示所有stage的分类以及回归的预测结果,2step返回4个值 Y = self.model_all.predict(x_in) #Y[0],Y[1]表示 alf_1st的预测结果,Y[2],Y[3]表示alf+2nd的预测结果 #Y[0]以及Y[2]表示cls_pred,Y[1]以及Y[3]表示reg_pred proposals = bbox_process.pred_pp_1st(self.anchors, Y[0], Y[1], opt) #bbx:[x1,y1,x2,y2] bbx, scores = bbox_process.pred_det(proposals, Y[2], Y[3], opt, step=2) f_res = np.repeat(frame_number, len(bbx), axis=0).reshape((-1, 1)) #generate width and height bbx[:, [2, 3]] -= bbx[:, [0, 1]] res_all += np.concatenate((f_res, bbx, scores), axis=-1).tolist() np.savetxt(res_file, np.array(res_all), fmt='%.4f') print 'Test time: %.4f s' % (time.time() - start_time)
def test_model(self, opt, val_data, weight_path, out_path): self.model_all.load_weights(weight_path, by_name=True) print 'load weights from {}'.format(weight_path) res_all = [] res_file = os.path.join(out_path, 'val_det.txt') start_time = time.time() for f in range(len(val_data)): filepath = val_data[f]['filepath'] frame_number = f + 1 img = cv2.imread(filepath) x_in = bbox_process.format_img(img, opt) Y = self.model_all.predict(x_in) proposals = bbox_process.pred_pp_1st(self.anchors, Y[0], Y[1], opt) proposals_2nd = bbox_process.pred_pp_2nd(proposals, Y[2], Y[3], opt) bbx, scores = bbox_process.pred_det(proposals_2nd, Y[4], Y[5], opt, step=3) f_res = np.repeat(frame_number, len(bbx), axis=0).reshape((-1, 1)) bbx[:, [2, 3]] -= bbx[:, [0, 1]] res_all += np.concatenate((f_res, bbx, scores), axis=-1).tolist() np.savetxt(res_file, np.array(res_all), fmt='%.4f') print 'Test time: %.4f s' % (time.time() - start_time)
def demo_onepic(self, opt, img_path, weight_path, output_path): self.model_all.load_weights(weight_path, by_name=True) print 'load weights from {}'.format(weight_path) img = cv2.imread(img_path) x_in = bbox_process.format_img(img, opt) Y = self.model_all.predict(x_in) # print Y[2] proposals = bbox_process.pred_pp_1st(self.anchors, Y[0], Y[1], opt) bbx, scores = bbox_process.pred_det(proposals, Y[2], Y[3], opt, step=2) for ind in range(len(bbx)): (x1, y1, x2, y2) = bbx[ind, :] cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.imshow('det_result', img) cv2.waitKey(0)
def demo(self,opt, val_data, weight_path, out_path): self.model_all.load_weights(weight_path, by_name=True) print 'load weights from {}'.format(weight_path) for f in range(len(val_data)): img_name = os.path.join('data/examples/',val_data[f]) if not img_name.lower().endswith(('.jpg', '.png')): continue print(img_name) img = cv2.imread(img_name) x_in = bbox_process.format_img(img, opt) Y = self.model_all.predict(x_in) proposals = bbox_process.pred_pp_1st(self.anchors, Y[0], Y[1], opt) bbx, scores = bbox_process.pred_det(proposals, Y[2], Y[3], opt, step=2) for ind in range(len(bbx)): (x1, y1, x2, y2) = bbx[ind, :] cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.imwrite(os.path.join(out_path, val_data[f]),img)
def test_model_with_input_image(self, img, opt, weight_path): self.model_all.load_weights(weight_path, by_name=True) print('load weights from {}'.format(weight_path)) x_in = bbox_process.format_img(img, opt) print('finished format_img.') Y = self.model_all.predict(x_in) print('finished model predict.') print('begin to gen proposals...') proposals = bbox_process.pred_pp_1st(self.anchors, Y[0], Y[1], opt) print('finished proposals generation.') print('begin to det box & score.') bbx, scores = bbox_process.pred_det(proposals, Y[2], Y[3], opt, step=2) # bbx[:, [2, 3]] -= bbx[:, [0, 1]] print('finished box & score det.') return bbx, scores
def demo_video(self, opt, video_path, weight_path, output_path): self.model_all.load_weights(weight_path, by_name=True) print 'load weights from {}'.format(weight_path) print video_path timeF = 3 cap = cv2.VideoCapture(video_path) ret, frame = cap.read() #resize成为模型能读取的大小 frame = cv2.resize(frame, (2048, 1024)) #格式转化 ,减去mean x_in = bbox_process.format_img(frame, opt) if not cap.isOpened(): raise IOError("Couldn't open webcam or video") # print (codec,size) codec = cv2.VideoWriter_fourcc(*'MJPG') size = ((2048), (1024)) out = cv2.VideoWriter( output_path, codec, 15.0, #15 frame per second size) c = 1 dets = [] while cap.isOpened(): det = [] ret, frame = cap.read() if frame is not None: if (c % timeF == 0): frame = cv2.resize(frame, (2048, 1024)) x_in = bbox_process.format_img(frame, opt) Y = self.model_all.predict(x_in) proposals = bbox_process.pred_pp_1st( self.anchors, Y[0], Y[1], opt) bbx, scores = bbox_process.pred_det(proposals, Y[2], Y[3], opt, step=2) # dets=np.hstack((bbx,scores[:,np.newaxis])).astype(float64) for ind in range(len(bbx)): (x1, y1, x2, y2) = bbx[ind, :] # print (x1,y1,x2,y2) # if (x2-x1<100)&(y2-y1<350)&(x2-x1>25)&(y2-y1>150): # cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) # det.append([x1,y1,x2,y2]) # if (x2-x1<100)&(y2-y1<350)&(x2-x1>25)&(y2-y1>150): cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) # det.append([x1,y1,x2,y2]) # print type(frame) dets.append(det) cv2.imshow('detection', frame) out.write(frame) k = cv2.waitKey(1) if (k & 0xff == ord('q')): break c = c + 1 else: break # 调用dets里面保存的所有帧的检测信息,进行帧与帧之间的关联 # links = createLinks(dets) # maxPath(dets, links) cap.release() cv2.destroyAllWindows()