def test_net(visualise, cache_scoremaps, development): logging.basicConfig(level=logging.INFO) cfg = load_config() dataset = create_dataset(cfg) dataset.set_shuffle(False) sm = SpatialModel(cfg) sm.load() draw_multi = PersonDraw() from_cache = "cached_scoremaps" in cfg if not from_cache: sess, inputs, outputs = setup_pose_prediction(cfg) if cache_scoremaps: out_dir = cfg.scoremap_dir if not os.path.exists(out_dir): os.makedirs(out_dir) pairwise_stats = dataset.pairwise_stats num_images = dataset.num_images if not development else min( 10, dataset.num_images) coco_results = [] for k in range(num_images): print('processing image {}/{}'.format(k, num_images - 1)) batch = dataset.next_batch() cache_name = "{}.mat".format(batch[Batch.data_item].coco_id) if not from_cache: outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]}) scmap, locref, pairwise_diff = extract_cnn_output( outputs_np, cfg, pairwise_stats) if cache_scoremaps: if visualise: img = np.squeeze(batch[Batch.inputs]).astype('uint8') pose = argmax_pose_predict(scmap, locref, cfg.stride) arrows = argmax_arrows_predict(scmap, locref, pairwise_diff, cfg.stride) visualize.show_arrows(cfg, img, pose, arrows) visualize.waitforbuttonpress() continue out_fn = os.path.join(out_dir, cache_name) dict = { 'scoremaps': scmap.astype('float32'), 'locreg_pred': locref.astype('float32'), 'pairwise_diff': pairwise_diff.astype('float32') } scipy.io.savemat(out_fn, mdict=dict) continue else: # cache_name = '1.mat' full_fn = os.path.join(cfg.cached_scoremaps, cache_name) mlab = scipy.io.loadmat(full_fn) scmap = mlab["scoremaps"] locref = mlab["locreg_pred"] pairwise_diff = mlab["pairwise_diff"] detections = extract_detections(cfg, scmap, locref, pairwise_diff) unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph( sm, detections) person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array) if visualise: img = np.squeeze(batch[Batch.inputs]).astype('uint8') # visualize.show_heatmaps(cfg, img, scmap, pose) """ # visualize part detections after NMS visim_dets = visualize_detections(cfg, img, detections) plt.imshow(visim_dets) plt.show() visualize.waitforbuttonpress() """ # """ visim_multi = img.copy() draw_multi.draw(visim_multi, dataset, person_conf_multi) plt.imshow(visim_multi) plt.show() visualize.waitforbuttonpress() # """ if cfg.use_gt_segm: coco_img_results = pose_predict_with_gt_segm( scmap, locref, cfg.stride, batch[Batch.data_item].gt_segm, batch[Batch.data_item].coco_id) coco_results += coco_img_results if len(coco_img_results): dataset.visualize_coco(coco_img_results, batch[Batch.data_item].visibilities) if cfg.use_gt_segm: with open('predictions_with_segm.json', 'w') as outfile: json.dump(coco_results, outfile) sess.close()
# Load and setup CNN part detector sess, inputs, outputs = predict.setup_pose_prediction(cfg) # Read image from file file_name = "demo/image_multi.png" image = imageio.imread(file_name, mode='RGB') image_batch = data_to_input(image) # Compute prediction with the CNN outputs_np = sess.run(outputs, feed_dict={inputs: image_batch}) scmap, locref, pairwise_diff = predict.extract_cnn_output( outputs_np, cfg, dataset.pairwise_stats) detections = extract_detections(cfg, scmap, locref, pairwise_diff) unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph( sm, detections) person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array) img = np.copy(image) visim_multi = img.copy() fig = plt.imshow(visim_multi) draw_multi.draw(visim_multi, dataset, person_conf_multi) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) plt.show() visualize.waitforbuttonpress()
sm, detections) unLab2, pos_array2, unary_array2, pwidx_array2, pw_array2 = eval_graph( sm, detections2) person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array) person_conf_multi2 = get_person_conf_multicut(sm, unLab2, unary_array2, pos_array2) img = np.copy(image) img2 = np.copy(image2) #coor = PersonDraw.draw() visim_multi = img.copy() visim_multi2 = img2.copy() co1 = draw_multi.draw(visim_multi, dataset, person_conf_multi) co2 = draw_multi.draw(visim_multi2, dataset, person_conf_multi2) cv2.imshow('frame', visim_multi2) cv2.imshow('frame', visim_multi) cv2.destroyAllWindows() #plt.show() visualize.waitforbuttonpress() #print("this is draw : ", co1) """ qwr = np.zeros((1920,1080,3), np.uint8) cv2.line(qwr, co1[5][0], co1[5][1],(255,0,0),3) cv2.line(qwr, co1[7][0], co1[7][1],(255,0,0),3) cv2.line(qwr, co1[6][0], co1[6][1],(255,0,0),3)
draw_multi = PersonDraw() # Load and setup CNN part detector sess, inputs, outputs = predict.setup_pose_prediction(cfg) # Read image from file file_name = "demo/image_multi.png" image = imread(file_name, mode='RGB') image_batch = data_to_input(image) # Compute prediction with the CNN outputs_np = sess.run(outputs, feed_dict={inputs: image_batch}) scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats) detections = extract_detections(cfg, scmap, locref, pairwise_diff) unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections) person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array) img = np.copy(image) visim_multi = img.copy() fig = plt.imshow(visim_multi) draw_multi.draw(visim_multi, dataset, person_conf_multi) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) plt.show() visualize.waitforbuttonpress()
def main(): start_time=time.time() print("main hai") tf.reset_default_graph() cfg = load_config("demo/pose_cfg_multi.yaml") dataset = create_dataset(cfg) sm = SpatialModel(cfg) sm.load() draw_multi = PersonDraw() # Load and setup CNN part detector sess, inputs, outputs = predict.setup_pose_prediction(cfg) # Read image from file dir=os.listdir("stick") k=0 cap=cv2.VideoCapture(0) i=0 while (cap.isOpened()): if i%20 == 0: ret, orig_frame= cap.read() if ret==True: frame = cv2.resize(orig_frame, (0, 0), fx=0.30, fy=0.30) image= frame sse=0 mse=0 image_batch = data_to_input(frame) # Compute prediction with the CNN outputs_np = sess.run(outputs, feed_dict={inputs: image_batch}) scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats) detections = extract_detections(cfg, scmap, locref, pairwise_diff) unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections) person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array) img = np.copy(image) #coor = PersonDraw.draw() visim_multi = img.copy() co1=draw_multi.draw(visim_multi, dataset, person_conf_multi) plt.imshow(visim_multi) plt.show() visualize.waitforbuttonpress() #print("this is draw : ", co1) if k==1: qwr = np.zeros((1920,1080,3), np.uint8) cv2.line(qwr, co1[5][0], co1[5][1],(255,0,0),3) cv2.line(qwr, co1[7][0], co1[7][1],(255,0,0),3) cv2.line(qwr, co1[6][0], co1[6][1],(255,0,0),3) cv2.line(qwr, co1[4][0], co1[4][1],(255,0,0),3) cv2.line(qwr, co1[9][0], co1[9][1],(255,0,0),3) cv2.line(qwr, co1[11][0], co1[11][1],(255,0,0),3) cv2.line(qwr, co1[8][0], co1[8][1],(255,0,0),3) cv2.line(qwr, co1[10][0], co1[10][1],(255,0,0),3) # In[9]: cv2.imshow('r',qwr) qwr2="stick/frame"+str(k)+".jpg" qw1 = cv2.cvtColor(qwr, cv2.COLOR_BGR2GRAY) qw2= cv2.cvtColor(qwr2, cv2.COLOR_BGR2GRAY) fig = plt.figure("Images") images = ("Original", qw1), ("Contrast", qw2) for (i, (name, image)) in enumerate(images): ax = fig.add_subplot(1, 3, i + 1) ax.set_title(name) plt.imshow(hash(tuple(image))) # compare the images s,m=compare_images(qw1, qw2, "Image1 vs Image2") k+=1 sse=s mse=m else: break elapsed= time.time()-start_time #print("sse score : ", sse) print("Mean squared error : ", elapsed/100) cap.release() cv2.destroyAllWindows()
def test_net(visualise, cache_scoremaps, development): logging.basicConfig(level=logging.INFO) cfg = load_config() dataset = create_dataset(cfg) dataset.set_shuffle(False) sm = SpatialModel(cfg) sm.load() draw_multi = PersonDraw() from_cache = "cached_scoremaps" in cfg if not from_cache: sess, inputs, outputs = setup_pose_prediction(cfg) if cache_scoremaps: out_dir = cfg.scoremap_dir if not os.path.exists(out_dir): os.makedirs(out_dir) pairwise_stats = dataset.pairwise_stats num_images = dataset.num_images if not development else min(10, dataset.num_images) coco_results = [] for k in range(num_images): print('processing image {}/{}'.format(k, num_images-1)) batch = dataset.next_batch() cache_name = "{}.mat".format(batch[Batch.data_item].coco_id) if not from_cache: outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]}) scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg, pairwise_stats) if cache_scoremaps: if visualise: img = np.squeeze(batch[Batch.inputs]).astype('uint8') pose = argmax_pose_predict(scmap, locref, cfg.stride) arrows = argmax_arrows_predict(scmap, locref, pairwise_diff, cfg.stride) visualize.show_arrows(cfg, img, pose, arrows) visualize.waitforbuttonpress() continue out_fn = os.path.join(out_dir, cache_name) dict = {'scoremaps': scmap.astype('float32'), 'locreg_pred': locref.astype('float32'), 'pairwise_diff': pairwise_diff.astype('float32')} scipy.io.savemat(out_fn, mdict=dict) continue else: #cache_name = '1.mat' full_fn = os.path.join(cfg.cached_scoremaps, cache_name) mlab = scipy.io.loadmat(full_fn) scmap = mlab["scoremaps"] locref = mlab["locreg_pred"] pairwise_diff = mlab["pairwise_diff"] detections = extract_detections(cfg, scmap, locref, pairwise_diff) unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections) person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array) if visualise: img = np.squeeze(batch[Batch.inputs]).astype('uint8') #visualize.show_heatmaps(cfg, img, scmap, pose) """ # visualize part detections after NMS visim_dets = visualize_detections(cfg, img, detections) plt.imshow(visim_dets) plt.show() visualize.waitforbuttonpress() """ # """ visim_multi = img.copy() draw_multi.draw(visim_multi, dataset, person_conf_multi) plt.imshow(visim_multi) plt.show() visualize.waitforbuttonpress() # """ if cfg.use_gt_segm: coco_img_results = pose_predict_with_gt_segm(scmap, locref, cfg.stride, batch[Batch.data_item].gt_segm, batch[Batch.data_item].coco_id) coco_results += coco_img_results if len(coco_img_results): dataset.visualize_coco(coco_img_results, batch[Batch.data_item].visibilities) if cfg.use_gt_segm: with open('predictions_with_segm.json', 'w') as outfile: json.dump(coco_results, outfile) sess.close()