Exemplo n.º 1
0
def preprocess_image(img_path, json_path=None):
    img = io.imread(img_path)  # img is nparr.  Yusssss
    #print("img.shape:\n{0}\n\n".format(img.shape)) # original shape
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = openpose.get_bbox(json_path)
        print("using openpose keypoints  json...")
        print("scale: ", scale)  # 0.12
        print("center: ", center)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)
    print("crop.size:", crop.size)
    pltshow(
        crop
    )  # for my Dropbox/vr_mall_backup/IMPORTANT/front.jpg image, this crop did something real weird to it.  Might be because the openpose keypoints are in a different order??   (HMR & Kanazawa are using 1.0 whereas I'm using 1.2)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)
    pltshow(crop)

    return crop, proc_param, img
Exemplo n.º 2
0
def rec_human(pipe_img_2, pipe_center, pipe_scale):
    config = flags.FLAGS
    config(sys.argv)
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    print(config.smpl_face_path)
    rec_human_count = 0
    rec_human_time = time.time()
    while True:

        img = pipe_img_2.recv()
        center = pipe_center.recv()
        scale = pipe_scale.recv()
        input_img, proc_param = img_util.scale_and_crop(
            img, scale, center, config.img_size)
        input_img = 2 * ((input_img / 255.) - 0.5)
        input_img = np.expand_dims(input_img, 0)
        joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                             get_theta=True)
        cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
            proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])
        #print(cam_for_render.shape)
        rec_human_count = rec_human_count + 1
        if rec_human_count == 100:
            print('rec FPS:', 1.0 / ((time.time() - rec_human_time) / 100.0))
            rec_human_count = 0
            rec_human_time = time.time()
Exemplo n.º 3
0
def preprocess_image(img_path, json_path=None):
    img = io.imread(img_path)
    if len(img.shape) == 2:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = op_util.get_bbox(json_path)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 4
0
Arquivo: demo.py Projeto: ats05/hmr
def preprocess_image(img_path, json_path=None):
    img = io.imread(img_path)
    print("----- image shape convert -----")
    print(img.strides)

    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = op_util.get_bbox(json_path)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    print(crop.strides)
    print(crop.size)
    print(crop.shape)
    print(dir(crop))

    return crop, proc_param, img
Exemplo n.º 5
0
    def preprocess_image(self, img):
        # img = io.imread(img_path)
        if img.shape[2] == 4:
            img = img[:, :, :3]

        # if json_path is None:
        if np.max(img.shape[:2]) != self.config.img_size:
            print('Resizing so the max image size is %d..' %
                  self.config.img_size)
            scale = (float(self.config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
        # else:
        #     scale, center = op_util.get_bbox(json_path)

        crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                                   self.config.img_size)

        # Normalize image to [-1, 1]
        crop = 2 * ((crop / 255.) - 0.5)

        return crop, proc_param, img
Exemplo n.º 6
0
def preprocess_image(img_path, person_bbox=None):
    img = io.imread(img_path)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if person_bbox is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        x1, y1, x2, y2 = person_bbox
        center = np.array([(x1 + x2) // 2, (y1 + y2) // 2])
        person_height = np.linalg.norm(y2 - y1)
        scale = 150. / person_height

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 7
0
def preprocess_image(img, bbox=None):
    
    # remove alpha channel
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if bbox is None:
        if np.max(img.shape[:2]) != config.img_size:
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2])/2).astype(int)
        center = center[::-1]

    else:
        t, l, b, r = [float(coord) for coord in bbox]
	min_pt = np.array([l, t])
	max_pt = np.array([r, b])
        person_height = np.linalg.norm(max_pt - min_pt)
        scale = 150. / person_height
        center = (min_pt + max_pt) / 2.
        print("scale,cetr")
        print(scale,center)

    crop, proc_param = image.scale_and_crop(img, scale, center,
                                               config.img_size)

    abnormal = crop
    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)
    return abnormal, crop, proc_param, img
Exemplo n.º 8
0
def preprocess_image(img, json_path=None):
    """
    Crops and rescales image - this function was given (my own bb crop code is separate)
    """
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = op_util.get_bbox(json_path)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 9
0
def preprocess_image(img_path, target_size, json_path=None):
    crops = []
    params = []
    imgs = []
    for img_name in sorted(os.listdir(img_path)):
        if not img_name.endswith('.jpg'):
            continue
        img = io.imread(os.path.join(img_path, img_name))
        if img.shape[2] == 4:
            img = img[:, :, :3]

        if json_path is None:
            if np.max(img.shape[:2]) != target_size:
                print('Resizing so the max image size is %d..' % target_size)
                scale = (float(target_size) / np.max(img.shape[:2]))
            else:
                scale = 1.
            center = np.round(np.array(img.shape[:2]) / 2).astype(int)
            # image center in (x,y)
            center = center[::-1]
        else:
            scale, center = op_util.get_bbox(os.path.join(json_path, img_name))

        crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                                   target_size)

        # Normalize image to [-1, 1]
        crop = 2 * ((crop / 255.) - 0.5)
        crops.append(crop)
        params.append(proc_param)
        imgs.append(img)
    return crops, params, imgs
Exemplo n.º 10
0
def preprocess_image(img_path, shift):
    img_in = io.imread(img_path)
    img_shifted = wrist_acc.img_shift_padding(img_in, shift)
    crop = []
    proc_param = []
    disp_img = []
    for img in img_shifted:
        if img.shape[2] == 4:
            img = img[:, :, :3]

        if np.max(img.shape[:2]) != config.img_size:
            # print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]

        _crop, _proc_param, _disp_img = img_util.scale_and_crop(img, scale, center,
                                                config.img_size)

        # Normalize image to [-1, 1]
        _crop = 2 * ((_crop / 255.) - 0.5)
        crop.append(_crop)
        proc_param.append(_proc_param)
        disp_img.append(_disp_img)

    return crop, proc_param, disp_img
Exemplo n.º 11
0
def preprocess_image(img_path, json_path=None):
    #img = io.imread(img_path)
    #img = Image.fromarray(img)
    img = Image.open(img_path)
    img = img.resize((64,128))
    img = np.array(img)
    #img = resize(img, (128 , 64))
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = op_util.get_bbox(json_path)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 12
0
def preprocess_image(img_path, kps):
    img = io.imread(img_path)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    scale, center = op_util.get_bbox_dict(kps)

    crop, proc_param = img_util.scale_and_crop(img, scale, center, 224)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 13
0
def preprocess_image(img,
                     depth,
                     json_path=None,
                     joints2d_gt=None,
                     cam_gt=None):

    #img = io.imread(img_path)
    #if img.shape[2] == 4:
    #    img = img[:, :, :3]
    #if depth_path is not None:
    #    if ".pfm" in depth_path:
    #        dep = pfm.load_pfm(depth_path)
    #    else:
    #        dep = io.imread(depth_path)
    #else:
    #    dep = np.zeros(img.size, dtype = np.float32)

    if img.shape[2] == 4:
        img = img[:, :, :3]
    depth = np.reshape(depth, [depth.shape[0], depth.shape[1], 1])
    img_orig = img
    img = np.concatenate([img, depth], -1)

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            #print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = op_util.get_bbox(json_path)
    if joints2d_gt is not None:
        crop, proc_param, joints2d_gt_scaled, cam_gt_scaled = img_util.scale_and_crop_with_gt(
            img, scale, center, config.img_size, joints2d_gt, cam_gt)

    else:
        joints2d_gt_scaled = None
        cam_gt_scaled = None
        crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                                   config.img_size)

    # Normalize image to [-1, 1]
    crop_img = crop[:, :, 0:3]
    crop_depth = np.reshape(crop[:, :, 3], [crop.shape[0], crop.shape[1], 1])
    crop_img = 2 * ((crop_img / 255.) - 0.5)
    depth_max = np.max(crop_depth)
    crop_depth = 2.0 * (crop_depth / depth_max - 0.5)
    return crop_img, crop_depth, proc_param, img_orig, joints2d_gt_scaled, cam_gt_scaled
Exemplo n.º 14
0
    def locate_person_and_crop(self, img_path):
        kps = get_people(img_path)
        img = io.imread(img_path)
        if img.shape[2] == 4:
            img = img[:, :, :3]

        scale, center = op_util.get_bbox_dict(kps)
        crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                                   224)
        # Normalize image to [-1, 1]
        crop = 2 * ((crop / 255.) - 0.5)
        # Add batch dimension: 1 x D x D x 3
        crop = np.expand_dims(crop, 0)

        return crop, proc_param, img
Exemplo n.º 15
0
def preprocess_image(img, pose_keypoints, use_pose=True):

    if use_pose:
        scale, center = get_bbox(pose_keypoints)

    if (not use_pose) or (scale is None):
        scale = (float(config.img_size) / np.max(img.shape[:2]))
        center = np.round(np.array(img.shape[:2]) / 2).astype(np.int)
        center = center[::-1]

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param
Exemplo n.º 16
0
def rec_human(pipe_img_2, pipe_center, pipe_scale, pipe_shape, pipe_kp):
    global last_person
    config = flags.FLAGS
    config(sys.argv)
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    rec_human_count = 0
    rec_human_time = time.time()
    #num_render = 1

    while True:

        img = pipe_img_2.recv()
        center = pipe_center.recv()
        scale = pipe_scale.recv()
        person_shape = pipe_shape.recv()
        kp = pipe_kp.recv()

        input_img, proc_param, last_person = img_util.scale_and_crop(
            img, scale, center, person_shape, 0.25, config.img_size,
            last_person)

        cv2.imwrite('/media/ramdisk/input.jpg', input_img)
        print(np.mean(input_img))

        input_img = ((input_img / 255.))

        # input_img = 2 * ((input_img / 255.) - 0.5)

        input_img = np.expand_dims(input_img, 0)
        joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                             get_theta=True)
        #cam_for_render, vert_shifted, joints_orig = vis_util.get_original(proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])
        write_obj(smpl_model_used, theta, outmesh_path)
        str_1 = open(outmesh_path, 'rb').read()
        message_id = queue2.sendMessage(delay=0).message(str_1).execute()
        msg2.append(message_id)
        if len(msg2) > 1:
            rt = queue2.deleteMessage(id=msg2[0]).execute()
            del msg2[0]

        rec_human_count = rec_human_count + 1
        if rec_human_count == 100:
            print('rec FPS:', 1.0 / ((time.time() - rec_human_time) / 100.0))
            rec_human_count = 0
            rec_human_time = time.time()
Exemplo n.º 17
0
def preprocess_image(img):

    if np.max(img.shape[:2]) != config.img_size:
        # print('Resizing so the max image size is %d..' % img_size)
        scale = (float(config.img_size) / np.max(img.shape[:2]))
    else:
        scale = 1.
    center = np.round(np.array(img.shape[:2]) / 2).astype(int)
    # image center in (x,y)
    center = center[::-1]

    crop, proc_param = img_util.scale_and_crop(img, scale, center, config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 18
0
def preprocess_image(img_path, img_size=224):
    img = io.imread(img_path)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if np.max(img.shape[:2]) != img_size:
        print('Resizing so the max image size is %d..' % img_size)
        scale = (float(img_size) / np.max(img.shape[:2]))
    else:
        scale = 1.
    center = np.round(np.array(img.shape[:2]) / 2).astype(int)
    # image center in (x,y)
    center = center[::-1]

    crop, proc_param = img_util.scale_and_crop(img, scale, center, img_size)
    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)
    return crop, proc_param, img
Exemplo n.º 19
0
def preprocess_image(img_path, json_path=None):
    img = io.imread(img_path)

    if json_path is None:
        scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = op_util.get_bbox(json_path)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img
Exemplo n.º 20
0
def preprocess_image(img_path, json_path, n):
    img = io.imread(img_path)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    scales, centers = op_util.get_multiple_bbox(json_path, n)

    crops = list()
    proc_params = list()

    for i in range(n):
        crop, proc_param = img_util.scale_and_crop(img, scales[i], centers[i],
                                                   config.img_size)

        # Normalize image to [-1, 1]
        crop = 2 * ((crop / 255.) - 0.5)

        crops.append(crop)
        proc_params.append(proc_param)

    return crops, proc_params, img
Exemplo n.º 21
0
def crop_person(img):
    kps = multipose_get_people(img)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    scale, center, min_pt, max_pt = op_util.get_bbox_info(kps)
    crop, proc_param = img_util.scale_and_crop(img, scale, center, 224)
    info = _2DPoseInfo(scale=proc_param['scale'],
                       crop_image_size=proc_param['img_size'],
                       crop_start_pt=proc_param['start_pt'],
                       crop_end_pt=proc_param['end_pt'],
                       bbox_start_pt=max_pt,
                       bbox_end_pt=min_pt,
                       original_image_size=img.shape)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)
    # Add batch dimension: 1 x D x D x 3
    crop = np.expand_dims(crop, 0)

    return crop, info
Exemplo n.º 22
0
def preprocess_multiple(img_path, json_path=None):
    img = io.imread(img_path)
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        raise Exception("No JSON.")

    else:
        scales_centers = op_util.get_bbox_all(json_path)

    crops_params_imgs = []

    for scale, center in scales_centers:
        crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                                   config.img_size)

        # Normalize image to [-1, 1]
        crop = 2 * ((crop / 255.) - 0.5)

        crops_params_imgs.append((crop, proc_param, img))

    return crops_params_imgs
Exemplo n.º 23
0
def preprocess_image_nathan(img, json_path=None):
    print("img.shape:\n{0}\n\n".format(img.shape))
    if img.shape[2] == 4:
        img = img[:, :, :3]

    if json_path is None:
        if np.max(img.shape[:2]) != config.img_size:
            print('Resizing so the max image size is %d..' % config.img_size)
            scale = (float(config.img_size) / np.max(img.shape[:2]))
        else:
            scale = 1.
        center = np.round(np.array(img.shape[:2]) / 2).astype(int)
        # image center in (x,y)
        center = center[::-1]
    else:
        scale, center = openpose.get_bbox(json_path)

    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)

    # Normalize image to [-1, 1]
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img  # what the f**k was Kanazawa even using this 'img' variable at the end for?  Maybe it's just left over from old code.
Exemplo n.º 24
0
while True:
    t0 = time.time()
    try:
        img = io.imread(config.img_path)
        if img.shape[2] == 4:
            img = img[:, :, :3]
    except IOError:
        print("image not found, try again!")
        continue
    else:
        print("image load success!")
    scale, center = op_util.get_bbox(config.json_path)
    if scale == -1 and center == -1: continue
    if scale >= 10: continue
    #print(111, scale, center, config.img_size)
    input_img, proc_param = img_util.scale_and_crop(img, scale, center,
                                                    config.img_size)
    input_img = 2 * ((input_img / 255.) - 0.5)
    input_img = np.expand_dims(input_img, 0)
    joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                         get_theta=True)
    #print('3D Rec:', time.time() - t0)
    cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
        proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])
    #print('3D Rec:', time.time() - t0)
    #print('type(cam_for_render):', type(cam_for_render))
    #print(img.shape[:2])

    #write_obj(smpl_model_used,theta,outmesh_path)
    #str_camera_pose = ''.join(cam_for_render)
    #data["flenth"] = cam_for_render[0]
    #data["px"] = cam_for_render[1]
Exemplo n.º 25
0
def rec_human(pipe_img_2, pipe_center, pipe_scale, pipe_kp):
    config = flags.FLAGS
    config(sys.argv)
    config.load_path = src.config.PRETRAINED_MODEL
    config.batch_size = 1
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    rec_human_count = 0
    rec_human_time = time.time()
    num_render = 1

    while True:

        img = pipe_img_2.recv()
        center = pipe_center.recv()
        scale = pipe_scale.recv()
        kp = pipe_kp.recv()
        input_img, proc_param = img_util.scale_and_crop(
            img, scale, center, config.img_size)
        input_img = 2 * ((input_img / 255.) - 0.5)
        input_img = np.expand_dims(input_img, 0)
        joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                             get_theta=True)
        cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
            proc_param, verts[0], cams[0], joints[0], img_size=img.shape[:2])

        print(111111)
        if num_render == 1:
            np.save('/media/ramdisk/render_data/1/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/1/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/1/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/1/kp.jpg',kp)
            #print(kp.shape)
            num_render = 2
        if num_render == 2:
            np.save('/media/ramdisk/render_data/2/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/2/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/2/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/2/kp.jpg',kp)
            num_render = 3
        if num_render == 3:
            np.save('/media/ramdisk/render_data/3/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/3/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/3/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/3/kp.jpg',kp)
            num_render = 4
        if num_render == 4:
            np.save('/media/ramdisk/render_data/4/cam_for_render.npy',
                    cam_for_render)
            np.save('/media/ramdisk/render_data/4/vert_shifted.npy',
                    vert_shifted)
            np.save('/media/ramdisk/render_data/4/kp.npy', kp)
            #cv2.imwrite('/media/ramdisk/render_data/4/kp.jpg',kp)
            num_render = 1

        rec_human_count = rec_human_count + 1
        if rec_human_count == 100:
            print('rec FPS:', 1.0 / ((time.time() - rec_human_time) / 100.0))
            rec_human_count = 0
            rec_human_time = time.time()