Ejemplo n.º 1
0
def makeObj(path=''):
    ## Load SMPL model (here we load the female model)
    ## Make sure path is correct
    m = load_model('../models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')

    if path != '':
        m3 = pickle.load(open(path, 'rb'), encoding='iso-8859-1')
        m.pose[:] = m3['pose']
        m.betas[:] = m3['betas']
    else:
        # Assign random pose and shape parameters
        m.pose[:] = np.random.rand(m.pose.size) * .2
        m.betas[:] = np.random.rand(m.betas.size) * .03

    ## Write to an .obj file
    outmesh_path = './hello_smpl.obj'
    with open(outmesh_path, 'w') as fp:
        for v in m.r:
            fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))

        for f in m.f + 1:  # Faces are 1-based, not 0-based in obj files
            fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))

    ## Print message
    print('..Output mesh saved to: ', outmesh_path)
Ejemplo n.º 2
0
def run_rec():
    # This is where we write the images, if an output_dir is given
    # in command line:
    out_dir = None
    # Now read in the image data. This must be a valid path!
    [X, y] = read_images('images')
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    my_model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    my_model.compute(X, y)
    # We then save the model, which uses Pythons pickle module:
    save_model('model.pkl', my_model)

    model = load_model('model.pkl')
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    #E = []
    #for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
    #    e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
    #    E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    #subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()

    im = Image.open('search.png')
    im = im.convert("L")
    predicted_label = model.predict(im)[0]

    print(predicted_label)
    return predicted_label
Ejemplo n.º 3
0
def run_rec():
    # This is where we write the images, if an output_dir is given
    # in command line:
    out_dir = None
    # Now read in the image data. This must be a valid path!
    [X,y] = read_images('images')
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    my_model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    my_model.compute(X, y)
    # We then save the model, which uses Pythons pickle module:
    save_model('model.pkl', my_model)

    model = load_model('model.pkl')
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    #E = []
    #for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
    #    e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
    #    E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    #subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()

    im = Image.open('search.png')
    im = im.convert("L")
    predicted_label = model.predict(im)[0]

    print(predicted_label)
    return predicted_label
Ejemplo n.º 4
0
sys.path.insert(0, '/data/Guha/GR/code/GR19/smpl/smpl_webuser')
sys.path.insert(0, '/data/Guha/GR/code/GR19/smpl/models')

import shutil
import os
import numpy as np
import  myUtil
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
from serialization import load_model
import cv2

############################ Use SMPL python library packages to instantiate SMPL body model ############
## Load SMPL model (here we load the female model)
m1 = load_model('/data/Guha/GR/code/GR19/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl')
m1.betas[:] = np.random.rand(m1.betas.size) * .03

m2 = load_model('/data/Guha/GR/code/GR19/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl')
m2.betas[:] = np.random.rand(m2.betas.size) * .03
## Create OpenDR renderer
rn1 = ColoredRenderer()
rn2 = ColoredRenderer()

## Assign attributes to renderer
w, h = (640, 480)

rn1.camera = ProjectPoints(v=m1, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2.,
                          c=np.array([w, h]) / 2., k=np.zeros(5))
rn1.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn1.set(v=m1, f=m1.f, bgcolor=np.zeros(3))
Ejemplo n.º 5
0
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = SpatialHistogram()
    #feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=NormalizedCorrelation(), k=1)
    #classifier = SVM()
    # Define the model as the combination
    my_model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    my_model.compute(X, y)
    # We then save the model, which uses Pythons pickle module:
    save_model('modelTry.pkl', my_model)
    model = load_model('modelTry.pkl')
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    #for i in range(min(model.feature.eigenvectors.shape[1], 16)):
    #    e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
    #    E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()

Ejemplo n.º 6
0
        print('mesh saved to: ', filepath)


# -------------- modify this part accordingly -----------------------
dataset_path = '/home/zhixuan/data1/HUMBI/HUMBI_uploaded/Body_81_140'
subject = 82
frame = 1
outmesh_path = './smpl_mesh.obj'
model_path = '../../models/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
# -------------------------------------------------------------------
val = np.loadtxt(
    '{}/subject_{}/body/{:08d}/reconstruction/smpl_parameter.txt'.format(
        dataset_path, subject, frame))
scale = val[0]
trans = val[1:4]
m = load_model(model_path)
m.pose[:] = val[4:76]
m.betas[:] = val[76:]

[v, Jtr] = _verts_core(m.pose,
                       m.v_posed,
                       m.J,
                       m.weights,
                       m.kintree_table,
                       want_Jtr=True,
                       xp=ch)
Jtr = Jtr * scale + trans
v = v * scale + trans

write_simple_obj(mesh_v=v, mesh_f=m.f, filepath=outmesh_path)
Ejemplo n.º 7
0
import pandas as pd
import pickle
from fractions import gcd
import math
import random
import transformations
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
from serialization import load_model
import h5py
import cv2
import transformations

## Load SMPL model (here we load the female model)
m = load_model('models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
import pickle
import numpy as np
import os

expmtname = 'expname'
dataset_type = 'Final'
os.mkdir('VISUALIZATION')
os.mkdir('VISUALIZATION/' + expmtname)
os.mkdir('VISUALIZATION/' + expmtname + '/INPUT/')
os.mkdir('VISUALIZATION/' + expmtname + '/OUTPUT/')
os.mkdir('VISUALIZATION/' + expmtname + '/TOGETHER/')

gt = pickle.load(open('dataset/test.p', "rb"))

pred = pickle.load(open('results/preds.p', "rb"))
Ejemplo n.º 8
0
def run(results_root, split, frame_nb, frame_start, z_min, z_max, texture_zoom,
        texture_type, render_body, background_datasets, ambiant_mean,
        ambiant_add, grasp_folder, grasp_split_path, min_obj_ratio, _config,
        obj_tex_datasets, random_obj_textures, grasp_nb, lsun_path,
        smpl_data_path, smpl_model_path, mano_right_path, shapenet_root,
        imagenet_path):
    print(_config)
    scene = bpy.data.scenes['Scene']
    # Clear default scene cube
    bpy.ops.object.delete()

    # Set results folders
    folder_meta = os.path.join(results_root, 'meta')
    folder_rgb = os.path.join(results_root, 'rgb')
    folder_segm = os.path.join(results_root, 'segm')
    folder_temp_segm = os.path.join(results_root, 'tmp_segm')
    folder_depth = os.path.join(results_root, 'depth')
    folders = [
        folder_meta, folder_rgb, folder_segm, folder_temp_segm, folder_depth
    ]
    folder_rgb_hand = os.path.join(results_root, 'rgb_hand')
    folder_rgb_obj = os.path.join(results_root, 'rgb_obj')
    folder_depth_hand = os.path.join(results_root, 'depth_hand')
    folder_depth_obj = os.path.join(results_root, 'depth_obj')
    folders.extend([folder_rgb_hand, folder_rgb_obj])

    # Create results directories
    for folder in folders:
        os.makedirs(folder, exist_ok=True)

    # Load smpl2mano correspondences
    right_smpl2mano = np.load('assets/models/smpl2righthand_verts.npy')

    # Load SMPL+H model and grasp infos
    ncomps = 45
    grasp_info = read_grasp_folder(
        grasp_folder=grasp_folder,
        shapenet_root=shapenet_root,
        split_path=grasp_split_path,
        split=split,
        filter_angle=94,
        grasp_nb=grasp_nb,
        mano_path=mano_right_path,
        obj_models='shapenet',
        use_cache=True)

    print('Loaded grasp info for {} grasps'.format(len(grasp_info)))
    smplh_model = smplh_load_model(
        smpl_model_path, ncomps=2 * ncomps, flat_hand_mean=True)
    mano_model = load_model(mano_right_path)
    mano_mesh = bpy.data.meshes.new('Mano')
    mano_mesh.from_pydata(list(np.array(mano_model.r)), [], list(mano_model.f))
    mano_obj = bpy.data.objects.new('Mano', mano_mesh)
    bpy.context.scene.objects.link(mano_obj)
    mano_obj.hide_render = True

    print('Loaded mano model')
    camutils.set_camera()

    backgrounds = imageutils.get_image_paths(
        background_datasets, split=split, lsun_path=lsun_path,
        imagenet_path=imagenet_path)
    print('Got {} backgrounds'.format(len(backgrounds)))

    # Get full body textures
    body_textures = imageutils.get_bodytexture_paths(
        texture_type, split=split, lsun_path=lsun_path, imagenet_path=imagenet_path)
    print('Got {} body textures'.format(len(body_textures)))

    obj_textures = imageutils.get_image_paths(
        obj_tex_datasets,
        split=split,
        shapenet_folder=shapenet_root,
        lsun_path=lsun_path,
        imagenet_path=imagenet_path)
    print('Got {} object textures'.format(len(obj_textures)))

    print('Finished loading textures')

    # Load smpl info
    smpl_data = np.load(smpl_data_path)

    smplh_verts, faces = smplh_model.r, smplh_model.f
    smplh_obj = mesh_manip.load_smpl()
    # Smooth the edges of the body model
    bpy.ops.object.shade_smooth()

    # Set camera rendering params
    scene.render.resolution_x = 256
    scene.render.resolution_y = 256
    scene.render.resolution_percentage = 100

    # Get camera info
    cam_calib = np.array(camutils.get_calib_matrix())
    cam_extr = np.array(camutils.get_extrinsic())

    scs, materials, sh_path = texturing.initialize_texture(
        smplh_obj, texture_zoom=texture_zoom, tmp_suffix='tmp')

    # Create object material if none is present
    print('Starting loop !')

    for i in range(frame_nb):
        frame_idx = i + frame_start
        np.random.seed(frame_idx)
        random.seed(frame_idx)
        tmp_files = []  # Keep track of temporary files to delete at the end

        grasp = random.choice(grasp_info)

        if 'mano_trans' in grasp:
            mano_model.trans[:] = [val for val in grasp['mano_trans']]
        else:
            mano_model.trans[:] = grasp['hand_trans']
        mano_model.pose[:] = grasp['hand_pose']
        mesh_manip.alter_mesh(mano_obj, mano_model.r.tolist())

        smplh_verts, posed_model, meta_info = mesh_manip.randomized_verts(
            smplh_model,
            smpl_data,
            ncomps=2 * ncomps,
            z_min=z_min,
            z_max=z_max,
            side='right',
            hand_pose=grasp['pca_pose'],
            hand_pose_offset=0,
            random_shape=False,
            random_pose=True,
            split=split)

        # Center mesh on center_idx
        mesh_manip.alter_mesh(smplh_obj, smplh_verts.tolist())

        # Load object
        obj_path = grasp['obj_path']
        obj = load_obj_model(obj_path)
        obj_scale = float(grasp['sample_scale']) / 1000
        obj.scale = (obj_scale, obj_scale, obj_scale)
        obj.rotation_euler = (0, 0, 0)
        bpy.ops.object.shade_smooth()

        model_name = obj.name
        obj_mesh = bpy.data.meshes[model_name]
        obj_scs = []

        # Create object material if none is present
        materials_tmp = []
        if len(obj_mesh.materials) == 0:
            mat = bpy.data.materials.new(name='{}_mat'.format(obj_mesh.name))
            bpy.ops.object.material_slot_add()
            obj.material_slots[0].material = mat
        for mat_idx, obj_mat in enumerate(obj_mesh.materials):
            materials_tmp.append(obj_mat)
            if random_obj_textures:
                obj_texture = random.choice(obj_textures)
                generated_uv = True
            else:
                obj_texture = os.path.join(
                    os.path.dirname(obj_path), 'texture.jpg')
                generated_uv = False
            obj_sh_path = texturing.add_obj_texture(
                obj_mat,
                obj_texture,
                sh_path,
                down_scale=texture_zoom,
                tmp_suffix='tmp',
                generated_uv=generated_uv)
            tmp_files.append(obj_sh_path)
            tmp_files.append(obj_sh_path.replace('.osl', '.oso'))
            obj_scs.append(obj_mat.node_tree.nodes['Script'])
            obj_scs[-1].update()

        # Apply transform to object
        rigid_transform = coordutils.get_rigid_transform_posed_mano(
            posed_model, mano_model)
        mano_obj.matrix_world = Matrix(rigid_transform)

        obj_transform = rigid_transform.dot(obj.matrix_world)
        obj.matrix_world = Matrix(obj_transform)
        obj.scale = (obj_scale, obj_scale, obj_scale)

        hand_info = coordutils.get_hand_body_info(
            posed_model,
            render_body=render_body,
            side='right',
            cam_extr=cam_extr,
            cam_calib=cam_calib,
            right_smpl2mano=right_smpl2mano)
        frame_prefix = '{:08}'.format(frame_idx)

        # Save object info
        hand_info['affine_transform'] = obj_transform.astype(np.float32)
        if random_obj_textures:
            hand_info['obj_texture'] = obj_texture
        hand_info['obj_path'] = obj_path
        hand_info['obj_scale'] = obj_scale

        # Save grasp info
        for label in [
                'sample_id', 'class_id', 'pca_pose', 'grasp_quality',
                'grasp_epsilon', 'grasp_volume', 'hand_trans',
                'hand_global_rot', 'hand_pose'
        ]:
            hand_info[label] = grasp[label]

        hand_infos = {**hand_info, **meta_info}

        camutils.set_camera()
        camera_name = 'Camera'
        # Randomly pick background
        bg_path = random.choice(backgrounds)

        # Setup depth and segmentation rendering
        depth_path = os.path.join(folder_depth, frame_prefix)
        tmp_segm_path = render.set_cycle_nodes(
            scene, bg_path, segm_path=folder_temp_segm, depth_path=depth_path)
        tmp_files.append(tmp_segm_path)
        tmp_depth = depth_path + '{:04d}.exr'.format(1)
        tmp_files.append(tmp_depth)
        # Randomly pick clothing texture
        tex_path = random.choice(body_textures)

        # Spherical harmonic lighting
        sh_coeffs = texturing.get_sh_coeffs(
            ambiant_mean=ambiant_mean, ambiant_max_add=ambiant_add)
        texturing.set_sh_coeffs(scs, sh_coeffs)
        texturing.set_sh_coeffs(obj_scs, sh_coeffs)

        # Update body+hands image
        tex_img = bpy.data.images.load(tex_path)
        for part, material in materials.items():
            material.node_tree.nodes['Image Texture'].image = tex_img

        # Render
        img_path = os.path.join(folder_rgb, '{}.jpg'.format(frame_prefix))
        scene.render.filepath = img_path
        scene.render.image_settings.file_format = 'JPEG'
        bpy.ops.render.render(write_still=True)

        # Render obj only
        obj_img_path = os.path.join(folder_rgb_obj,
                                    '{}.jpg'.format(frame_prefix))
        smplh_obj.hide_render = True
        scene.render.filepath = obj_img_path
        obj_depth_path = os.path.join(folder_depth_obj, frame_prefix)
        tmp_segm_obj_path = render.set_cycle_nodes(
            scene,
            bg_path,
            segm_path=folder_temp_segm,
            depth_path=obj_depth_path)
        tmp_obj_depth = obj_depth_path + '{:04d}.exr'.format(1)
        tmp_files.append(tmp_obj_depth)
        tmp_files.append(tmp_segm_obj_path)
        bpy.ops.render.render(write_still=True)

        # Render hand only
        hand_img_path = os.path.join(folder_rgb_hand,
                                     '{}.jpg'.format(frame_prefix))
        smplh_obj.hide_render = False
        obj.hide_render = True
        scene.render.filepath = hand_img_path
        hand_depth_path = os.path.join(folder_depth_hand, frame_prefix)
        tmp_segm_hand_path = render.set_cycle_nodes(
            scene,
            bg_path,
            segm_path=folder_temp_segm,
            depth_path=hand_depth_path)

        tmp_hand_depth = hand_depth_path + '{:04d}.exr'.format(1)
        tmp_files.append(tmp_hand_depth)
        tmp_files.append(tmp_segm_hand_path)
        bpy.ops.render.render(write_still=True)

        # Delete objects
        delete_obj_model(obj)

        camutils.check_camera(camera_name=camera_name)
        segm_img = cv2.imread(tmp_segm_path)[:, :, 0]
        if render_body:
            keep_render = True
        else:
            keep_render = conditions.segm_condition(
                segm_img, side='right', use_grasps=True)
        depth, depth_min, depth_max = depthutils.convert_depth(tmp_depth)

        hand_infos['depth_min'] = depth_min
        hand_infos['depth_max'] = depth_max
        hand_infos['bg_path'] = bg_path
        hand_infos['sh_coeffs'] = sh_coeffs
        hand_infos['body_tex'] = tex_path
        # Concatenate depth as rgb
        hand_depth, hand_depth_min, hand_depth_max = depthutils.convert_depth(
            tmp_hand_depth)
        obj_depth, obj_depth_min, obj_depth_max = depthutils.convert_depth(
            tmp_obj_depth)
        depth = np.stack([depth, hand_depth, obj_depth], axis=2)
        hand_infos['hand_depth_min'] = hand_depth_min
        hand_infos['hand_depth_max'] = hand_depth_max
        hand_infos['obj_depth_min'] = obj_depth_min
        hand_infos['obj_depth_max'] = obj_depth_max

        # Concatenate segm as rgb
        obj_segm = cv2.imread(tmp_segm_obj_path)[:, :, 0]
        hand_segm = cv2.imread(tmp_segm_hand_path)[:, :, 0]
        keep_render_obj, obj_ratio = conditions.segm_obj_condition(
            segm_img, obj_segm, min_obj_ratio=min_obj_ratio)
        keep_render = keep_render and keep_render_obj
        hand_infos['obj_visibility_ratio'] = obj_ratio
        segm_img = np.stack([segm_img, hand_segm, obj_segm], axis=2)

        # Clean residual files
        if keep_render:
            # Write depth image
            final_depth_path = os.path.join(folder_depth,
                                            '{}.png'.format(frame_prefix))
            cv2.imwrite(final_depth_path, depth)

            # Save meta
            meta_pkl_path = os.path.join(folder_meta,
                                         '{}.pkl'.format(frame_prefix))
            with open(meta_pkl_path, 'wb') as meta_f:
                pickle.dump(hand_infos, meta_f)

            # Write segmentation path
            segm_save_path = os.path.join(folder_segm,
                                          '{}.png'.format(frame_prefix))
            cv2.imwrite(segm_save_path, segm_img)
            ex.log_scalar('generated.idx', frame_idx)
        else:
            os.remove(img_path)
            os.remove(obj_img_path)
            os.remove(hand_img_path)
        for filepath in tmp_files:
            os.remove(filepath)

        # Remove materials
        for material in materials_tmp:
            material.user_clear()
            bpy.data.materials.remove(material)
    print('DONE')
import sys
sys.path.insert(0, '/data/Guha/GR/code/GR19/smpl/smpl_webuser')
import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
from serialization import load_model
import os
import myUtil
import cv2
import matplotlib.pyplot as plt
import itertools

####################################3 Load SMPL model (here we load the female model) ######################################
m1 = load_model('../models/basicModel_m_lbs_10_207_0_v1.0.0.pkl')
m1.betas[:] = np.random.rand(m1.betas.size) * .03

m2 = load_model('../models/basicModel_m_lbs_10_207_0_v1.0.0.pkl')
m2.betas[:] = np.random.rand(m2.betas.size) * .03
## Create OpenDR renderer
rn1 = ColoredRenderer()
rn2 = ColoredRenderer()

## Assign attributes to renderer
w, h = (640, 480)

rn1.camera = ProjectPoints(v=m1,
                           rt=np.zeros(3),
                           t=np.array([0, 0, 2.]),
                           f=np.array([w, w]) / 2.,
                           c=np.array([w, h]) / 2.,
 handler.setFormatter(formatter)
 # Add handler to facerec modules, so we see what's going on inside:
 logger = logging.getLogger("facerec")
 logger.addHandler(handler)
 logger.setLevel(logging.DEBUG)
 # Define the Fisherfaces as Feature Extraction method:
 feature = Fisherfaces()
 # Define a 1-NN classifier with Euclidean Distance:
 classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
 # Define the model as the combination
 my_model = PredictableModel(feature=feature, classifier=classifier)
 # Compute the Fisherfaces on the given data (in X) and labels (in y):
 my_model.compute(X, y)
 # We then save the model, which uses Pythons pickle module:
 save_model("model.pkl", my_model)
 model = load_model("model.pkl")
 # Then turn the first (at most) 16 eigenvectors into grayscale
 # images (note: eigenvectors are stored by column!)
 E = []
 for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
     e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
     E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))
 # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
 subplot(
     title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png"
 )
 # Perform a 10-fold cross validation
 cv = KFoldCrossValidation(model, k=10)
 cv.validate(X, y)
 # And print the result:
 cv.print_results()
Ejemplo n.º 11
0
def load_model_file(model_filename):
    model = load_model(model_filename)
    return model