def preprocess_image(img_path, json_path=None): #img = io.imread(img_path) #img = Image.fromarray(img) img = Image.open(img_path) img = img.resize((64,128)) img = np.array(img) #img = resize(img, (128 , 64)) if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != config.img_size: print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(json_path) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) return crop, proc_param, img
def preprocess_image(img, json_path=None): """ Crops and rescales image - this function was given (my own bb crop code is separate) """ if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != config.img_size: print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(json_path) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) return crop, proc_param, img
def detection(pipe_img, pipe_center, pipe_scale, pipe_img_2, pipe_kp): params = set_params() opWrapper = op.WrapperPython() opWrapper.configure(params) opWrapper.start() detection_count = 0 detection_time = time.time() while True: img = pipe_img.recv() datum = op.Datum() datum.cvInputData = img opWrapper.emplaceAndPop([datum]) bodyKeypoints_img = datum.cvOutputData cv2.rectangle(bodyKeypoints_img, (330, 620), (630, 720), (0, 0, 255), 3) #cv2.imwrite('kps.jpg',bodyKeypoints_img) json_path = glob.glob('/media/ramdisk/output_op/*keypoints.json') scale, center = op_util.get_bbox(json_path[0]) if scale == -1 and center == -1: continue if scale >= 10: continue pipe_img_2.send(img) pipe_center.send(center) pipe_scale.send(scale) pipe_kp.send(bodyKeypoints_img) os.system("rm /media/ramdisk/output_op/*keypoints.json") detection_count = detection_count + 1 if detection_count == 100: print('Detection FPS:', 1.0 / ((time.time() - detection_time) / 100.0)) detection_count = 0 detection_time = time.time()
def preprocess_image(img_path, target_size, json_path=None): crops = [] params = [] imgs = [] for img_name in sorted(os.listdir(img_path)): if not img_name.endswith('.jpg'): continue img = io.imread(os.path.join(img_path, img_name)) if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != target_size: print('Resizing so the max image size is %d..' % target_size) scale = (float(target_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(os.path.join(json_path, img_name)) crop, proc_param = img_util.scale_and_crop(img, scale, center, target_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) crops.append(crop) params.append(proc_param) imgs.append(img) return crops, params, imgs
def preprocess_image(img_path, json_path=None): img = io.imread(img_path) print("----- image shape convert -----") print(img.strides) if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != config.img_size: print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(json_path) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) print(crop.strides) print(crop.size) print(crop.shape) print(dir(crop)) return crop, proc_param, img
def preprocess_image(img_path, json_path=None): img = io.imread(img_path) # img is nparr. Yusssss #print("img.shape:\n{0}\n\n".format(img.shape)) # original shape if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != config.img_size: print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = openpose.get_bbox(json_path) print("using openpose keypoints json...") print("scale: ", scale) # 0.12 print("center: ", center) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) print("crop.size:", crop.size) pltshow( crop ) # for my Dropbox/vr_mall_backup/IMPORTANT/front.jpg image, this crop did something real weird to it. Might be because the openpose keypoints are in a different order?? (HMR & Kanazawa are using 1.0 whereas I'm using 1.2) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) pltshow(crop) return crop, proc_param, img
def preprocess_image(img_path, json_path=None): img = io.imread(img_path) if len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != config.img_size: print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(json_path) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) return crop, proc_param, img
def preprocess_image(img, depth, json_path=None, joints2d_gt=None, cam_gt=None): #img = io.imread(img_path) #if img.shape[2] == 4: # img = img[:, :, :3] #if depth_path is not None: # if ".pfm" in depth_path: # dep = pfm.load_pfm(depth_path) # else: # dep = io.imread(depth_path) #else: # dep = np.zeros(img.size, dtype = np.float32) if img.shape[2] == 4: img = img[:, :, :3] depth = np.reshape(depth, [depth.shape[0], depth.shape[1], 1]) img_orig = img img = np.concatenate([img, depth], -1) if json_path is None: if np.max(img.shape[:2]) != config.img_size: #print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(json_path) if joints2d_gt is not None: crop, proc_param, joints2d_gt_scaled, cam_gt_scaled = img_util.scale_and_crop_with_gt( img, scale, center, config.img_size, joints2d_gt, cam_gt) else: joints2d_gt_scaled = None cam_gt_scaled = None crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop_img = crop[:, :, 0:3] crop_depth = np.reshape(crop[:, :, 3], [crop.shape[0], crop.shape[1], 1]) crop_img = 2 * ((crop_img / 255.) - 0.5) depth_max = np.max(crop_depth) crop_depth = 2.0 * (crop_depth / depth_max - 0.5) return crop_img, crop_depth, proc_param, img_orig, joints2d_gt_scaled, cam_gt_scaled
def preprocess_image(img_path, json_path=None): img = io.imread(img_path) if json_path is None: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = op_util.get_bbox(json_path) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) return crop, proc_param, img
def detection(pipe_img, pipe_center, pipe_scale, pipe_shape, pipe_img_2, pipe_kp): params = set_params() opWrapper = op.WrapperPython() opWrapper.configure(params) opWrapper.start() detection_count = 0 detection_time = time.time() while True: img = pipe_img.recv() datum = op.Datum() datum.cvInputData = img opWrapper.emplaceAndPop([datum]) bodyKeypoints_img = datum.cvOutputData #cv2.rectangle(bodyKeypoints_img,(330,50),(630,720),(0,0,255),1) #cv2.rectangle(bodyKeypoints_img,(330,630),(630,720),(0,0,255),3) cv2.imwrite('/media/ramdisk/kps.jpg', bodyKeypoints_img) str_img_kps = base64.b64encode( open('/media/ramdisk/kps.jpg', 'rb').read()) message_id = queue1.sendMessage(delay=0).message( str_img_kps.decode('utf-8')).execute() msg1.append(message_id) if len(msg1) > 1: rt = queue1.deleteMessage(id=msg1[0]).execute() del msg1[0] json_path = glob.glob('/media/ramdisk/output_op/*keypoints.json') scale, center, person_shape = op_util.get_bbox(json_path[0]) if scale == -1 and center == -1 and person_shape == -1: continue if scale >= 10: continue pipe_img_2.send(img) pipe_center.send(center) pipe_scale.send(scale) pipe_shape.send(person_shape) pipe_kp.send(bodyKeypoints_img) os.system("rm /media/ramdisk/output_op/*keypoints.json") detection_count = detection_count + 1 if detection_count == 100: print('Detection FPS:', 1.0 / ((time.time() - detection_time) / 100.0)) detection_count = 0 detection_time = time.time()
def preprocess_image_nathan(img, json_path=None): print("img.shape:\n{0}\n\n".format(img.shape)) if img.shape[2] == 4: img = img[:, :, :3] if json_path is None: if np.max(img.shape[:2]) != config.img_size: print('Resizing so the max image size is %d..' % config.img_size) scale = (float(config.img_size) / np.max(img.shape[:2])) else: scale = 1. center = np.round(np.array(img.shape[:2]) / 2).astype(int) # image center in (x,y) center = center[::-1] else: scale, center = openpose.get_bbox(json_path) crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) # Normalize image to [-1, 1] crop = 2 * ((crop / 255.) - 0.5) return crop, proc_param, img # what the f**k was Kanazawa even using this 'img' variable at the end for? Maybe it's just left over from old code.
json_file.write(json.dumps(data)) print('camera pose writed!') while True: t0 = time.time() try: img = io.imread(config.img_path) if img.shape[2] == 4: img = img[:, :, :3] except IOError: print("image not found, try again!") continue else: print("image load success!") scale, center = op_util.get_bbox(config.json_path) if scale == -1 and center == -1: continue if scale >= 10: continue #print(111, scale, center, config.img_size) input_img, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size) input_img = 2 * ((input_img / 255.) - 0.5) input_img = np.expand_dims(input_img, 0) joints, verts, cams, joints3d, theta = model.predict(input_img, get_theta=True) #print('3D Rec:', time.time() - t0) cam_for_render, vert_shifted, joints_orig = vis_util.get_original( proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2]) #print('3D Rec:', time.time() - t0) #print('type(cam_for_render):', type(cam_for_render)) #print(img.shape[:2])