예제 #1
0
def produceInput(NN):
    json_file = ut.getCaffeCpm() + '/jsonDatasets/H36M_annotations_testSet.json'
    (data, num_elem) = ut.loadJsonFile(json_file)
    curr_data = data[0]
    
    img = ut.cv2.imread(curr_data['img_paths'])
    joints = np.array(curr_data['joint_self'])
    
    center = ut.getCenterJoint(joints)
    img_width = img.shape[1]
    img_height = img.shape[0]
    box_points = ut.getBoundingBox(joints, center, NN['offset'], img_width, img_height)
    
    # manipulate image and joints for caffe
    (img_croppad, joints) = ut.cropImage(img, box_points, joints)
    (resizedImage, joints) = ut.resizeImage(img_croppad, NN['inputSize'], joints)
    resizedImage = np.divide(resizedImage, float(256))
    resizedImage = np.subtract(resizedImage, 0.5)
    
    # generate labels and center channel
    input_size = NN['inputSize']
    center_channel = ut.generateGaussian(NN['sigma_center'], input_size, [input_size/2, input_size/2])
    
    fno = int(curr_data['annolist_index'])
    camera = curr_data['camera']
    action = curr_data['action']
    person = curr_data['person']
    metadata_ch = ut.generateMaskChannel(NN['inputSize'], fno, camera, action, person)
    imgch = np.concatenate((resizedImage, center_channel[:,:,np.newaxis], metadata_ch), axis=2)
    
    imgch = np.transpose(imgch, (2, 0, 1))
    return imgch
예제 #2
0
파일: demo_img.py 프로젝트: DenisTome/caffe
def preprocessImage(NN, curr_data, batch_size, num_channels):
    info = np.empty(6, dtype=int)
        
    img = ut.cv2.imread(curr_data['img_paths'])
    joints = np.array(curr_data['joint_self'])
    
    center = ut.getCenterJoint(joints)
    img_width = img.shape[1]
    img_height = img.shape[0]
    info[:2] = img.shape[:2]
    box_points = ut.getBoundingBox(joints, center, NN['offset'], img_width, img_height)
    info[2:] = box_points
    
    # manipulate image and joints for caffe
    (img_croppad, joints) = ut.cropImage(img, box_points, joints)
    (resImage, joints) = ut.resizeImage(img_croppad, NN['inputSize'], joints)
    resizedImage = np.divide(resImage, float(256))
    resizedImage = np.subtract(resizedImage, 0.5)
    
    # generate labels and center channel
    input_size = NN['inputSize']
    center_channel = ut.generateGaussian(NN['sigma_center'], input_size, [input_size/2, input_size/2])
    
    if (num_channels == 4):
        imgch = np.concatenate((resizedImage, center_channel[:,:,np.newaxis]), axis=2)
    else:
        fno = int(curr_data['annolist_index'])
        camera = curr_data['camera']
        action = curr_data['action']
        person = curr_data['person']
        metadata_ch = ut.generateMaskChannel(NN['inputSize'], fno, camera, action, person)
        imgch = np.concatenate((resizedImage, center_channel[:,:,np.newaxis], metadata_ch), axis=2)
    
    imgch = np.transpose(imgch, (2, 0, 1))
    return (imgch, info, resImage)
예제 #3
0
def preprocessImages(NN, data, batch_size, batch_imgch, num_channels, masks):
    batch_info = np.empty((batch_size, 6), dtype=int)
    for b in range(batch_size):
        if (batch_size > 1):
            curr_data = data[b]
        else:
            curr_data = data

        img = ut.cv2.imread(curr_data['img_paths'])
        joints = np.array(curr_data['joint_self'])
        joints = ut.removeZ(joints)

        center = ut.getCenterJoint(joints)
        img_width = img.shape[1]
        img_height = img.shape[0]
        batch_info[b, :2] = img.shape[:2]
        box_points = ut.getBoundingBox(joints, center, NN['offset'], img_width,
                                       img_height)
        batch_info[b, 2:] = box_points

        # manipulate image and joints for caffe
        (img_croppad, joints) = ut.cropImage(img, box_points, joints)
        (resizedImage, joints) = ut.resizeImage(img_croppad, NN['inputSize'],
                                                joints)
        resizedImage = np.divide(resizedImage, float(256))
        resizedImage = np.subtract(resizedImage, 0.5)

        # generate labels and center channel
        input_size = NN['inputSize']
        center_channel = ut.generateGaussian(NN['sigma_center'], input_size,
                                             [input_size / 2, input_size / 2])

        if (num_channels == 4):
            imgch = np.concatenate(
                (resizedImage, center_channel[:, :, np.newaxis]), axis=2)
        else:
            fno = int(curr_data['annolist_index'])
            camera = masks['mask_camera'][0, fno - 1]
            action = masks['mask_action'][0, fno - 1]
            person = masks['mask_person'][0, fno - 1]
            metadata_ch = ut.generateMaskChannel(NN['inputSize'],
                                                 curr_data['annolist_index'],
                                                 camera, action, person)
            imgch = np.concatenate(
                (resizedImage, center_channel[:, :, np.newaxis], metadata_ch),
                axis=2)

        imgch = np.transpose(imgch, (2, 0, 1))
        batch_imgch[b] = imgch
    return (batch_imgch, batch_info)
예제 #4
0
def preprocessImages(NN, data, batch_size, batch_imgch, num_channels, masks):
    batch_info = np.empty((batch_size, 6),dtype=int)
    for b in range(batch_size):
        if (batch_size > 1):
            curr_data = data[b]
        else:
            curr_data = data
        
        img = ut.cv2.imread(curr_data['img_paths'])
        joints = np.array(curr_data['joint_self'])
        joints = ut.removeZ(joints)
        
        center = ut.getCenterJoint(joints)
        img_width = img.shape[1]
        img_height = img.shape[0]
        batch_info[b,:2] = img.shape[:2]
        box_points = ut.getBoundingBox(joints, center, NN['offset'], img_width, img_height)
        batch_info[b,2:] = box_points
        
        # manipulate image and joints for caffe
        (img_croppad, joints) = ut.cropImage(img, box_points, joints)
        (resizedImage, joints) = ut.resizeImage(img_croppad, NN['inputSize'], joints)
        resizedImage = np.divide(resizedImage, float(256))
        resizedImage = np.subtract(resizedImage, 0.5)
        
        # generate labels and center channel
        input_size = NN['inputSize']
        center_channel = ut.generateGaussian(NN['sigma_center'], input_size, [input_size/2, input_size/2])
        
        if (num_channels == 4):
            imgch = np.concatenate((resizedImage, center_channel[:,:,np.newaxis]), axis=2)
        else:
            fno = int(curr_data['annolist_index'])
            camera = masks['mask_camera'][0, fno - 1]
            action = masks['mask_action'][0, fno - 1]
            person = masks['mask_person'][0, fno - 1]
            metadata_ch = ut.generateMaskChannel(NN['inputSize'],
                                                 curr_data['annolist_index'], camera,
                                                 action, person)
            imgch = np.concatenate((resizedImage, center_channel[:,:,np.newaxis], metadata_ch), axis=2)
        
        imgch = np.transpose(imgch, (2, 0, 1))
        batch_imgch[b] = imgch
    return (batch_imgch, batch_info)
예제 #5
0
def preprocessImage(NN, curr_data, batch_size, num_channels):
    info = np.empty(6, dtype=int)

    img = ut.cv2.imread(curr_data['img_paths'])
    joints = np.array(curr_data['joint_self'])

    center = ut.getCenterJoint(joints)
    img_width = img.shape[1]
    img_height = img.shape[0]
    info[:2] = img.shape[:2]
    box_points = ut.getBoundingBox(joints, center, NN['offset'], img_width,
                                   img_height)
    info[2:] = box_points

    # manipulate image and joints for caffe
    (img_croppad, joints) = ut.cropImage(img, box_points, joints)
    (resImage, joints) = ut.resizeImage(img_croppad, NN['inputSize'], joints)
    resizedImage = np.divide(resImage, float(256))
    resizedImage = np.subtract(resizedImage, 0.5)

    # generate labels and center channel
    input_size = NN['inputSize']
    center_channel = ut.generateGaussian(NN['sigma_center'], input_size,
                                         [input_size / 2, input_size / 2])

    if (num_channels == 4):
        imgch = np.concatenate(
            (resizedImage, center_channel[:, :, np.newaxis]), axis=2)
    else:
        fno = int(curr_data['annolist_index'])
        camera = curr_data['camera']
        action = curr_data['action']
        person = curr_data['person']
        metadata_ch = ut.generateMaskChannel(NN['inputSize'], fno, camera,
                                             action, person)
        imgch = np.concatenate(
            (resizedImage, center_channel[:, :, np.newaxis], metadata_ch),
            axis=2)

    imgch = np.transpose(imgch, (2, 0, 1))
    return (imgch, info, resImage)
예제 #6
0
def produceInput(NN):
    json_file = ut.getCaffeCpm(
    ) + '/jsonDatasets/H36M_annotations_testSet.json'
    (data, num_elem) = ut.loadJsonFile(json_file)
    curr_data = data[0]

    img = ut.cv2.imread(curr_data['img_paths'])
    joints = np.array(curr_data['joint_self'])

    center = ut.getCenterJoint(joints)
    img_width = img.shape[1]
    img_height = img.shape[0]
    box_points = ut.getBoundingBox(joints, center, NN['offset'], img_width,
                                   img_height)

    # manipulate image and joints for caffe
    (img_croppad, joints) = ut.cropImage(img, box_points, joints)
    (resizedImage, joints) = ut.resizeImage(img_croppad, NN['inputSize'],
                                            joints)
    resizedImage = np.divide(resizedImage, float(256))
    resizedImage = np.subtract(resizedImage, 0.5)

    # generate labels and center channel
    input_size = NN['inputSize']
    center_channel = ut.generateGaussian(NN['sigma_center'], input_size,
                                         [input_size / 2, input_size / 2])

    fno = int(curr_data['annolist_index'])
    camera = curr_data['camera']
    action = curr_data['action']
    person = curr_data['person']
    metadata_ch = ut.generateMaskChannel(NN['inputSize'], fno, camera, action,
                                         person)
    imgch = np.concatenate(
        (resizedImage, center_channel[:, :, np.newaxis], metadata_ch), axis=2)

    imgch = np.transpose(imgch, (2, 0, 1))
    return imgch
예제 #7
0
파일: demo_img.py 프로젝트: DenisTome/caffe
def executeOnFrame(NN, net, data, output_dir, proj=False, astr=False,
                   show=False, stage_3d_err=False, hm_joint=False):
    num_channels = ut.getNumChannelsLayer(net,'data')
    joints = np.array(data['joint_self'])
    
    # Get Image
    img_orig = ut.cv2.imread(data['img_paths'])
    box_points = ut.getBoundingBox(joints, ut.getCenterJoint(joints),
                               75, img_orig.shape[1], img_orig.shape[0])
    img = ut.cropImage(img_orig, box_points)
    save_name = output_dir + 'image_RGB.png'
    if astr:
        save_name = output_dir + 'image_RGB' + astr + '.png'
    img_saved = ut.convertImgCv2(img)    
    ut.plt.imsave(save_name, img_saved)
    
    # Extract 2D predictions
    (imgch, info, resizedImage) = preprocessImage(NN, data, 1, num_channels)
    ut.netForward(net, imgch)
    img = ut.convertImgCv2(img)
    layer_name = 'Mconv5_stage6_new'
    out = ut.getOutputLayer(net, layer_name)
    (pred, heatMaps, err) = postprocessHeatmaps(NN, out, data, info)
    img_2d_skel = ut.plotImageJoints(img_orig, pred, h=(not show))
    img_2d_skel = ut.cropImage(img_2d_skel, box_points)
    save_name = output_dir + 'image_2d.png'
    if astr:
        save_name = output_dir + 'image_2d' + astr + '.png'
    ut.plt.imsave(save_name, img_2d_skel)
    
    # Plot data
    heatMap = ut.cropImage(heatMaps[:,:,-1], box_points)
    save_name = output_dir + 'image_hm.png'
    if astr:
        save_name = output_dir + 'image_hm' + astr + '.png'
    ut.plt.imsave(save_name, heatMap)
    
    Lambda = 0.05
    (default_r, e, z, weights) = load_parameters()
    (w, s, mean) = normalise_data(pred.flatten())
    w = w[np.newaxis,:]
    camera = int(data['camera']) - 1
    (a, r) = uc.estimate_a_and_r(w, e, z, default_r[camera], Lambda*weights)
    for j in xrange(10):
        r = uc.reestimate_r(w, z, a, e, default_r[camera], r)
        (a, res) = uc.reestimate_a(w, e, z, r, default_r[camera], Lambda*weights)
    mod = uc.build_model(a, e, z).squeeze()
    save_name = output_dir + 'image_3d.pdf'
    if astr:
        save_name = output_dir + 'image_3d' + astr + '.pdf'
    ut.plot3DJoints(-mod, save_pdf=save_name)
    #ut.plot3DJoints(-mod, pbaspect=[1,0.88,1])
    
    if stage_3d_err:
        resizedImage = ut.convertImgCv2(resizedImage)/255.0
                                        
        layers = ['conv7_stage1_new', 'Mconv5_stage2_new',
                  'Mconv5_stage3_new', 'Mconv5_stage4_new',
                  'Mconv5_stage5_new', 'Mconv5_stage6_new']
        gt3d = ut.filterJoints(data['joint_self_3d'])
        for l in range(len(layers)):
            layer_name = layers[l]
            out = ut.getOutputLayer(net, layer_name)
            (pred, _, _) = postprocessHeatmaps(NN, out, data, info)
            if hm_joint:
                # save heat-map
                heatMap = ut.cv2.resize(out[hm_joint], (NN['inputSize'],NN['inputSize']),
                                        interpolation = ut.cv2.INTER_LANCZOS4)
                save_name = 'hm_joint_%d_stage_%d.png' % (hm_joint, l)
                ut.plt.imsave(output_dir+save_name, heatMap)
                # generate heat-maps
                channels = out.transpose(1,2,0)
                hm = ut.cv2.resize(channels, (resizedImage.shape[0],resizedImage.shape[0]),
                                   interpolation = ut.cv2.INTER_LANCZOS4)
                hm = hm[:,:,hm_joint]
                ut.plt.imshow(resizedImage)
                ut.plt.hold(True)
                ut.plt.imshow(hm,alpha=0.6) #color='Blues')
                save_name = 'hm_joint_%d_stage_%d.pdf' % (hm_joint, l)
                ut.plt.axis('off')
                ut.plt.savefig(output_dir+save_name)
                ut.plt.close()
                # generate skeletons
                save_name = 'img_skel_stage_%d.png' % (l)
                img_2d_skel = ut.plotImageJoints(img_orig, pred, h=False)
                new_box_points = box_points.copy()
                new_box_points[0] += 120
                new_box_points[2] -= 240
                new_box_points[1] += 70
                new_box_points[3] -= 70
                img_2d_skel = ut.cropImage(img_2d_skel, new_box_points)
                ut.plt.imsave(output_dir+save_name, img_2d_skel)
            (w, s, mean), (w3d, s3d) = normalise_data(pred.flatten(), w3d=gt3d)
            w = w[np.newaxis,:]
            (a, r) = uc.estimate_a_and_r(w, e, z, default_r[camera], Lambda*weights)
            for j in xrange(10):
                r = uc.reestimate_r(w, z, a, e, default_r[camera], r)
                (a, res) = uc.reestimate_a(w, e, z, r, default_r[camera], Lambda*weights)
            m = uc.build_and_rot_model(a,e,z,r).squeeze()
            err = cost3d(m, w3d, s3d)
            print '3D error stage %r: %.2f' % (l, err/17.0)
            save_name = '%sstage_%d_3d.pdf' % (output_dir, l)
            title = 'Stage %r\nErr: %.2f mm' % (l+1, err/17.0)
            ut.plot3DJoints(-m, save_pdf=save_name, title=title)
    
    if proj:
        points = project2D(r, z, a, e, default_r[camera], s).squeeze()
        points += mean[:,np.newaxis]
        ut.plt.figure()
        img_2d_skel = ut.plotImageJoints(img_orig, points.T, h=False)
        img_2d_skel = ut.cropImage(img_2d_skel, box_points)
        save_name = output_dir + 'image_2d_proj.png'
        if astr:
            save_name = output_dir + 'image_2d_proj' + astr + '.png'
        ut.plt.imsave(save_name, img_2d_skel)
예제 #8
0
def executeOnFrame(NN,
                   net,
                   data,
                   output_dir,
                   proj=False,
                   astr=False,
                   show=False,
                   stage_3d_err=False,
                   hm_joint=False):
    num_channels = ut.getNumChannelsLayer(net, 'data')
    joints = np.array(data['joint_self'])

    # Get Image
    img_orig = ut.cv2.imread(data['img_paths'])
    box_points = ut.getBoundingBox(joints, ut.getCenterJoint(joints), 75,
                                   img_orig.shape[1], img_orig.shape[0])
    img = ut.cropImage(img_orig, box_points)
    save_name = output_dir + 'image_RGB.png'
    if astr:
        save_name = output_dir + 'image_RGB' + astr + '.png'
    img_saved = ut.convertImgCv2(img)
    ut.plt.imsave(save_name, img_saved)

    # Extract 2D predictions
    (imgch, info, resizedImage) = preprocessImage(NN, data, 1, num_channels)
    ut.netForward(net, imgch)
    img = ut.convertImgCv2(img)
    layer_name = 'Mconv5_stage6_new'
    out = ut.getOutputLayer(net, layer_name)
    (pred, heatMaps, err) = postprocessHeatmaps(NN, out, data, info)
    img_2d_skel = ut.plotImageJoints(img_orig, pred, h=(not show))
    img_2d_skel = ut.cropImage(img_2d_skel, box_points)
    save_name = output_dir + 'image_2d.png'
    if astr:
        save_name = output_dir + 'image_2d' + astr + '.png'
    ut.plt.imsave(save_name, img_2d_skel)

    # Plot data
    heatMap = ut.cropImage(heatMaps[:, :, -1], box_points)
    save_name = output_dir + 'image_hm.png'
    if astr:
        save_name = output_dir + 'image_hm' + astr + '.png'
    ut.plt.imsave(save_name, heatMap)

    Lambda = 0.05
    (default_r, e, z, weights) = load_parameters()
    (w, s, mean) = normalise_data(pred.flatten())
    w = w[np.newaxis, :]
    camera = int(data['camera']) - 1
    (a, r) = uc.estimate_a_and_r(w, e, z, default_r[camera], Lambda * weights)
    for j in xrange(10):
        r = uc.reestimate_r(w, z, a, e, default_r[camera], r)
        (a, res) = uc.reestimate_a(w, e, z, r, default_r[camera],
                                   Lambda * weights)
    mod = uc.build_model(a, e, z).squeeze()
    save_name = output_dir + 'image_3d.pdf'
    if astr:
        save_name = output_dir + 'image_3d' + astr + '.pdf'
    ut.plot3DJoints(-mod, save_pdf=save_name)
    #ut.plot3DJoints(-mod, pbaspect=[1,0.88,1])

    if stage_3d_err:
        resizedImage = ut.convertImgCv2(resizedImage) / 255.0

        layers = [
            'conv7_stage1_new', 'Mconv5_stage2_new', 'Mconv5_stage3_new',
            'Mconv5_stage4_new', 'Mconv5_stage5_new', 'Mconv5_stage6_new'
        ]
        gt3d = ut.filterJoints(data['joint_self_3d'])
        for l in range(len(layers)):
            layer_name = layers[l]
            out = ut.getOutputLayer(net, layer_name)
            (pred, _, _) = postprocessHeatmaps(NN, out, data, info)
            if hm_joint:
                # save heat-map
                heatMap = ut.cv2.resize(out[hm_joint],
                                        (NN['inputSize'], NN['inputSize']),
                                        interpolation=ut.cv2.INTER_LANCZOS4)
                save_name = 'hm_joint_%d_stage_%d.png' % (hm_joint, l)
                ut.plt.imsave(output_dir + save_name, heatMap)
                # generate heat-maps
                channels = out.transpose(1, 2, 0)
                hm = ut.cv2.resize(
                    channels, (resizedImage.shape[0], resizedImage.shape[0]),
                    interpolation=ut.cv2.INTER_LANCZOS4)
                hm = hm[:, :, hm_joint]
                ut.plt.imshow(resizedImage)
                ut.plt.hold(True)
                ut.plt.imshow(hm, alpha=0.6)  #color='Blues')
                save_name = 'hm_joint_%d_stage_%d.pdf' % (hm_joint, l)
                ut.plt.axis('off')
                ut.plt.savefig(output_dir + save_name)
                ut.plt.close()
                # generate skeletons
                save_name = 'img_skel_stage_%d.png' % (l)
                img_2d_skel = ut.plotImageJoints(img_orig, pred, h=False)
                new_box_points = box_points.copy()
                new_box_points[0] += 120
                new_box_points[2] -= 240
                new_box_points[1] += 70
                new_box_points[3] -= 70
                img_2d_skel = ut.cropImage(img_2d_skel, new_box_points)
                ut.plt.imsave(output_dir + save_name, img_2d_skel)
            (w, s, mean), (w3d, s3d) = normalise_data(pred.flatten(), w3d=gt3d)
            w = w[np.newaxis, :]
            (a, r) = uc.estimate_a_and_r(w, e, z, default_r[camera],
                                         Lambda * weights)
            for j in xrange(10):
                r = uc.reestimate_r(w, z, a, e, default_r[camera], r)
                (a, res) = uc.reestimate_a(w, e, z, r, default_r[camera],
                                           Lambda * weights)
            m = uc.build_and_rot_model(a, e, z, r).squeeze()
            err = cost3d(m, w3d, s3d)
            print '3D error stage %r: %.2f' % (l, err / 17.0)
            save_name = '%sstage_%d_3d.pdf' % (output_dir, l)
            title = 'Stage %r\nErr: %.2f mm' % (l + 1, err / 17.0)
            ut.plot3DJoints(-m, save_pdf=save_name, title=title)

    if proj:
        points = project2D(r, z, a, e, default_r[camera], s).squeeze()
        points += mean[:, np.newaxis]
        ut.plt.figure()
        img_2d_skel = ut.plotImageJoints(img_orig, points.T, h=False)
        img_2d_skel = ut.cropImage(img_2d_skel, box_points)
        save_name = output_dir + 'image_2d_proj.png'
        if astr:
            save_name = output_dir + 'image_2d_proj' + astr + '.png'
        ut.plt.imsave(save_name, img_2d_skel)