tic = time.time() CMT.process_frame(im_gray) toc = time.time() # Display results # Draw updated estimate if CMT.has_result: cv2.line(im_draw, CMT.tl, CMT.tr, (255, 0, 0), 4) cv2.line(im_draw, CMT.tr, CMT.br, (255, 0, 0), 4) cv2.line(im_draw, CMT.br, CMT.bl, (255, 0, 0), 4) cv2.line(im_draw, CMT.bl, CMT.tl, (255, 0, 0), 4) util.draw_keypoints(CMT.tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT.votes[:, :2], im_draw) # blue util.draw_keypoints(CMT.outliers[:, :2], im_draw, (0, 0, 255)) if args.output is not None: # Original image cv2.imwrite('{0}/input_{1:08d}.png'.format(args.output, frame), im) # Output image cv2.imwrite('{0}/output_{1:08d}.png'.format(args.output, frame), im_draw) # Keypoints with open('{0}/keypoints_{1:08d}.csv'.format(args.output, frame), 'w') as f: f.write('x y\n')
def foo(tl, br): frame = 1 while True: # Read image status, im = cap.read() #if frame >1: # im = im[tl[1]-50:br[1]+50, tl[0]-50:br[0]+50] # Crop from x, y, w, h -> 100, 200, 300, 400 # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h] #cv2.imshow("cropped",im) #cv2.waitKey(0) if not status: break im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im_draw = np.copy(im) tic = time.time() CMT[0].process_frame(im_gray) toc = time.time() tic2 = time.time() CMT[1].process_frame(im_gray) toc = time.time() # Display results # Draw updated estimate if CMT[0].has_result: cv2.line(im_draw, CMT[0].tl, CMT[0].tr, (255, 0, 0), 4) cv2.line(im_draw, CMT[0].tr, CMT[0].br, (255, 0, 0), 4) cv2.line(im_draw, CMT[0].br, CMT[0].bl, (255, 0, 0), 4) cv2.line(im_draw, CMT[0].bl, CMT[0].tl, (255, 0, 0), 4) if CMT[1].has_result: cv2.line(im_draw, CMT[1].tl, CMT[1].tr, (255, 0, 0), 4) cv2.line(im_draw, CMT[1].tr, CMT[1].br, (255, 0, 0), 4) cv2.line(im_draw, CMT[1].br, CMT[1].bl, (255, 0, 0), 4) cv2.line(im_draw, CMT[1].bl, CMT[1].tl, (255, 0, 0), 4) util.draw_keypoints(CMT[0].tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT[0].votes[:, :2], im_draw) # blue util.draw_keypoints(CMT[0].outliers[:, :2], im_draw, (0, 0, 255)) util.draw_keypoints(CMT[1].tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT[1].votes[:, :2], im_draw) # blue util.draw_keypoints(CMT[1].outliers[:, :2], im_draw, (0, 0, 255)) if args.output is not None: # Original image cv2.imwrite('{0}/input_{1:08d}.png'.format(args.output, frame), im) # Output image cv2.imwrite('{0}/output_{1:08d}.png'.format(args.output, frame), im_draw) # Keypoints with open('{0}/keypoints_{1:08d}.csv'.format(args.output, frame), 'w') as f: f.write('x y\n') np.savetxt(f, CMT[0].tracked_keypoints[:, :2], fmt='%.2f') np.savetxt(f, CMT[1].tracked_keypoints[:, :2], fmt='%.2f') # Outlier with open('{0}/outliers_{1:08d}.csv'.format(args.output, frame), 'w') as f: f.write('x y\n') np.savetxt(f, CMT[0].outliers, fmt='%.2f') np.savetxt(f, CMT[1].outliers, fmt='%.2f') # Votes with open('{0}/votes_{1:08d}.csv'.format(args.output, frame), 'w') as f: f.write('x y\n') np.savetxt(f, CMT[0].votes, fmt='%.2f') np.savetxt(f, CMT[1].votes, fmt='%.2f') # Bounding box with open('{0}/bbox_{1:08d}.csv'.format(args.output, frame), 'w') as f: f.write('x y\n') # Duplicate entry tl is not a mistake, as it is used as a drawing instruction np.savetxt(f, np.array((CMT[0].tl, CMT[0].tr, CMT[0].br, CMT[0].bl, CMT[0].tl)), fmt='%.2f') np.savetxt(f, np.array((CMT[1].tl, CMT[1].tr, CMT[1].br, CMT[1].bl, CMT[1].tl)), fmt='%.2f') if not args.quiet: cv2.imshow('main', im_draw) # Check key input k = cv2.waitKey(pause_time) key = chr(k & 255) if key == 'q': break if key == 'd': import ipdb ipdb.set_trace() # Remember image im_prev = im_gray # Advance frame number frame += 1 print '{5:04d}: center: {0:.2f},{1:.2f} scale: {2:.2f}, active: {3:03d}, {4:04.0f}ms'.format( CMT[0].center[0], CMT[0].center[1], CMT[0].scale_estimate, CMT[0].active_keypoints.shape[0], 1000 * (toc - tic), frame)
# Draw updated estimate if CMT.has_result: #Drawing center of Object center = (((CMT.tl[0] + CMT.br[0]) / 2), ((CMT.tl[1] + CMT.br[1]) / 2)) (xcenter, ycenter) = center cv2.line(frame, (xcenter, ycenter - 15), (xcenter, ycenter + 15), (0, 0, 255), 3) cv2.line(frame, (xcenter - 15, ycenter), (xcenter + 15, ycenter), (0, 0, 255), 3) cv2.circle(frame, center, 12, (0, 0, 255), 3) #Drawing rectangle around object cv2.line(frame, CMT.tl, CMT.tr, (255, 0, 0), 4) cv2.line(frame, CMT.tr, CMT.br, (255, 0, 0), 4) cv2.line(frame, CMT.br, CMT.bl, (255, 0, 0), 4) cv2.line(frame, CMT.bl, CMT.tl, (255, 0, 0), 4) #Drawing moving keypoints util.draw_keypoints(CMT.tracked_keypoints, frame, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT.votes[:, :2], frame) # blue util.draw_keypoints(CMT.outliers[:, :2], frame, (0, 0, 255)) cv2.imshow('frame', frame) if cv2.waitKey(FRAME_TIME) == 27: break cap.release() cv2.destroyAllWindows()
tic = time.time() CMT.process_frame(im_gray) toc = time.time() # Display results # Draw updated estimate if CMT.has_result: cv2.line(im_draw, CMT.tl, CMT.tr, (255, 0, 0), 4) cv2.line(im_draw, CMT.tr, CMT.br, (255, 0, 0), 4) cv2.line(im_draw, CMT.br, CMT.bl, (255, 0, 0), 4) cv2.line(im_draw, CMT.bl, CMT.tl, (255, 0, 0), 4) util.draw_keypoints(CMT.tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT.votes[:, :2], im_draw) # blue util.draw_keypoints(CMT.outliers[:, :2], im_draw, (0, 0, 255)) if args.output is not None: # Original image cv2.imwrite('{0}/input_{1:08d}.png'.format(args.output, frame), im) # Output image cv2.imwrite('{0}/output_{1:08d}.png'.format(args.output, frame), im_draw) # Keypoints with open('{0}/keypoints_{1:08d}.csv'.format(args.output, frame), 'w') as f: f.write('x y\n') np.savetxt(f, CMT.tracked_keypoints[:, :2], fmt='%.2f')
def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) # Read image #status, im = cap.read() #im=cv_image #im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) im_draw = np.copy(cv_image) tic = time.time() self.CRT.process_frame(im_gray) self.CNT.process_frame(im_gray) self.CST.process_frame(im_gray) toc = time.time() # Display results # Draw updated estimate if self.CRT.has_result: cv2.line(im_draw, self.CRT.tl, self.CRT.tr, (255, 0, 0), 4) cv2.line(im_draw, self.CRT.tr, self.CRT.br, (255, 0, 0), 4) cv2.line(im_draw, self.CRT.br, self.CRT.bl, (255, 0, 0), 4) cv2.line(im_draw, self.CRT.bl, self.CRT.tl, (255, 0, 0), 4) if self.CNT.has_result: cv2.line(im_draw, self.CNT.tl, self.CNT.tr, (255, 255, 0), 4) cv2.line(im_draw, self.CNT.tr, self.CNT.br, (255, 255, 0), 4) cv2.line(im_draw, self.CNT.br, self.CNT.bl, (255, 255, 0), 4) cv2.line(im_draw, self.CNT.bl, self.CNT.tl, (255, 255, 0), 4) if self.CST.has_result: cv2.line(im_draw, self.CST.tl, self.CST.tr, (0, 255, 0), 4) cv2.line(im_draw, self.CST.tr, self.CST.br, (0, 255, 0), 4) cv2.line(im_draw, self.CST.br, self.CST.bl, (0, 255, 0), 4) cv2.line(im_draw, self.CST.bl, self.CST.tl, (0, 255, 0), 4) util.draw_keypoints(self.CRT.tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(self.CRT.votes[:, :2], im_draw) # blue util.draw_keypoints(self.CRT.outliers[:, :2], im_draw, (0, 0, 255)) cv2.imshow('main', im_draw) # Check key input #k = cv2.waitKey(self.pause_time) #key = chr(k & 255) #if key == 'q': # break #if key == 'd': # import ipdb; ipdb.set_trace() # Remember image im_prev = im_gray # Advance frame number self.frame += 1 print( '{5:04d}: center: {0:.2f},{1:.2f} scale: {2:.2f}, active: {3:03d}, {4:04.0f}ms' .format(self.CRT.center[0], self.CRT.center[1], self.CRT.scale_estimate, self.CRT.active_keypoints.shape[0], 1000 * (toc - tic), self.frame)) try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) except CvBridgeError as e: print(e)
def callback(self, data): global frame global started #global cv_image try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") print(self.bridge.imgmsg_to_cv2(data, "bgr8")) except CvBridgeError as e: print(e) # Clean up preview = 'preview' # If no input path was specified, open camera device #cap = cv2.VideoCapture(0) if preview is None: preview = True # Check if videocapture is working #if not cap.isOpened(): # print("Unable to open video input.") # sys.exit(1) #while preview: #status, im = cap.read() status = True im = cv_image if started == 0: cv2.imshow('Preview', im) #cv2.imshow('Preview', cv_image) k = cv2.waitKey(10) if not k == -1: #break im0 = cv_image im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY) #im_gray0 = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) im_draw = np.copy(cv_image) (tl, br) = util.get_rect(im_draw) print("using", tl, br, "as init bb") CMT.initialise(im_gray0, tl, br) frame = 1 started = 1 cv2.destroyAllWindows() # Read first frame #status, im0 = cap.read() if started == 1: # Read image #status, im = cap.read() #im=cv_image if not status: cv2.destroyAllWindows() sys.exit(1) im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) #im_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) im_draw = np.copy(im) #im_draw = np.copy(cv_image) tic = time.time() CMT.process_frame(im_gray) toc = time.time() # Display results # Draw updated estimate if CMT.has_result: cv2.line(im_draw, CMT.tl, CMT.tr, (255, 0, 0), 4) cv2.line(im_draw, CMT.tr, CMT.br, (255, 0, 0), 4) cv2.line(im_draw, CMT.br, CMT.bl, (255, 0, 0), 4) cv2.line(im_draw, CMT.bl, CMT.tl, (255, 0, 0), 4) util.draw_keypoints(CMT.tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT.votes[:, :2], im_draw) # blue util.draw_keypoints(CMT.outliers[:, :2], im_draw, (0, 0, 255)) cv2.imshow('main', im_draw) #cv2.imshow('main', im_draw) # Check key input k = cv2.waitKey(pause_time) key = chr(k & 255) if key == 'q': #cap.release() cv2.destroyAllWindows() rospy.signal_shutdown("ROSPy Shutdown") #break sys.exit(1) if key == 'd': import ipdb ipdb.set_trace() # Remember image im_prev = im_gray # Advance frame number frame += 1 print( "{5:04d}: center: {0:.2f},{1:.2f} scale: {2:.2f}, active: {3:03d}, {4:04.0f}ms" .format(CMT.center[0], CMT.center[1], CMT.scale_estimate, CMT.active_keypoints.shape[0], 1000 * (toc - tic), frame)) #(rows,cols,channels) = cv_image.shape #if cols > 60 and rows > 60 : # cv2.circle(cv_image, (50,50), 10, 255) cv2.imshow("Image window", cv_image) cv2.waitKey(3) try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) except CvBridgeError as e: print(e)
#-*-coding:utf-8-*- import cv2 import os from body import Body from util import draw_keypoints, draw_skeleton if __name__ == '__main__': weight_path = '/home/tl/Desktop/pytorch-openpose (copy)/model/body_pose_model.pth' test_dir = '/media/zhuzhu/6684B82784B7F81F/debug/src' dest_dir = '/media/zhuzhu/6684B82784B7F81F/debug/dst' model = Body(weight_path) for img in os.listdir(test_dir): img_path = os.path.join(test_dir, img) oriImg = cv2.imread(img_path) # B,G,R order candidate, subset = model(ori_img) canvas = draw_keypoints(ori_img, candidate, subset) canvas = draw_skeleton(canvas, candidate, subset) cv2.imwrite(os.path.join(dest_dir, img), canvas)
def run_CMT(input_path, skip_frames, bbox, SHOW_IMAGES=False, clip_name='main'): from numpy import empty, nan import CMT import util CMT = CMT.CMT() CMT.estimate_scale = True CMT.estimate_rotation = False # read video and set delay accordingly cap = cv2.VideoCapture(input_path) cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, skip_frames) # something wrong with reading the shot it seems if not cap.isOpened(): print '#Unable to open video input.' sys.exit(1) status, im0 = cap.read() im_gray0_ = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY) im_gray0 = cv2.equalizeHist(im_gray0_) im_draw = np.copy(im0) # prepare initial bbox values bbox_values = [int(v) for v in bbox] bbox = array(bbox_values) # Convert to point representation, adding singleton dimension bbox = util.bb2pts(bbox[None, :]) # Squeeze bbox = bbox[0, :] tl = bbox[:2] br = bbox[2:4] print '#using', tl, br, 'as init bb' CMT_TRACKS=[] CMT.initialise(im_gray0, tl, br) frame = 1 while True: # Read image status, im = cap.read() if not status: break im_gray_ = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im_gray = cv2.equalizeHist(im_gray_) im_draw = np.copy(im) tic = time.time() CMT.process_frame(im_gray) toc = time.time() # Display results # Draw updated estimate if CMT.has_result: cv2.line(im_draw, CMT.tl, CMT.tr, (255, 0, 0), 4) cv2.line(im_draw, CMT.tr, CMT.br, (255, 0, 0), 4) cv2.line(im_draw, CMT.br, CMT.bl, (255, 0, 0), 4) cv2.line(im_draw, CMT.bl, CMT.tl, (255, 0, 0), 4) CMT_rect = [CMT.tl, CMT.tr,CMT.bl,CMT.br] util.draw_keypoints(CMT.tracked_keypoints, im_draw, (255, 255, 255)) # this is from simplescale util.draw_keypoints(CMT.votes[:, :2], im_draw) # blue util.draw_keypoints(CMT.outliers[:, :2], im_draw, (0, 0, 255)) if SHOW_IMAGES: cv2.imshow(clip_name, im_draw) # Check key input k = cv2.cv.WaitKey(10) & 0xff # if args.quiet: # if CMT.has_result: print '1'#, print_str # else: print '0'#, print_str # Remember image im_prev = im_gray # Advance frame number frame += 1 print_str = '{5:04d}: center: {0:.2f},{1:.2f} scale: {2:.2f}, active: {3:03d}, {4:04.0f}ms'.format(CMT.center[0], CMT.center[1], CMT.scale_estimate, CMT.active_keypoints.shape[0], 1000 * (toc - tic), frame) if CMT.has_result: CMT_TRACKS.append([1,CMT_rect]) else: CMT_TRACKS.append([0,nan])#, print_str return CMT_TRACKS