def test_export_image_jpg(mock_open, exists, PILImage): exists.return_value = False fake_path = '/fake/fake.jpg' with open(fake_path) as f: type(f).name = PropertyMock(return_value=fake_path) mio.export_image(f, test_img, extension='jpg') PILImage.save.assert_called_once()
def img_pre(path_to_images, save_dir=None, propotion=0.2, scale=400.0, greyscale=False): import menpo.io as mio from menpo.visualize import print_progress if save_dir is not None: mk_dir(save_dir, 0) for image_path in path_to_images: for img in print_progress(mio.import_images(image_path, verbose=True)): if greyscale: # convert to greyscale if img.n_channels == 3: img = img.as_greyscale() # crop to landmarks bounding box with an extra 20% padding re_img = img.crop_to_landmarks_proportion(propotion) # # rescale image if its diagonal is bigger than 400 pixels # d = img.diagonal() # if d > scale: # img = img.rescale(scale / d) # save enhanced image with lable img_suffix = img.path.suffix lb_suffix = '.pts' new_image_name = '%s' % img.path.name.split('.')[0] img_path = os.path.join(save_dir, new_image_name + img_suffix) lb_path = os.path.join(save_dir, new_image_name + lb_suffix) mio.export_image(re_img, img_path, overwrite=True) mio.export_landmark_file(re_img.landmarks['PTS'], lb_path, overwrite=True)
def test_export_image_jpg(mock_open, exists, PILImage): exists.return_value = False fake_path = '/fake/fake.jpg' with open(fake_path) as f: type(f).name = PropertyMock(return_value=fake_path) mio.export_image(test_img, f, extension='jpg') PILImage.save.assert_called_once()
def warp_landmarked_image_folder(top_dir, template, detector, ext='.jpg'): """ finds all images with associated .pts landmark files and performs warping on them """ mask = menpo.image.BooleanImage.init_from_pointcloud(template) warpeds = [] shapes = [] labels = [] folders = glob.glob(os.path.join(top_dir, '*')) new_folders = [ folder.replace(imset, '{}-warped'.format(imset)) for folder in folders ] for label, folder in enumerate(tqdm(folders)): os.makedirs(new_folders[label], exist_ok=True) for im_fn in tqdm(glob.glob('{}/*{}'.format(folder, ext))): try: image = mio.import_image(im_fn) shape = image.landmarks['PTS'] except: continue bboxes = detector(image) if len(bboxes) < 1: print('no face found in {}'.format(im_fn)) continue min_b, max_b = bboxes[0].bounds() cropped = image.crop(min_b, max_b) new_fn = im_fn.replace(imset, '{}-warped'.format(imset)) transform = menpo.transform.AlignmentAffine(shape, template) warped = cropped.warp_to_mask(mask, transform) mio.export_image(warped, new_fn, overwrite=True) warpeds.append(warped.pixels) shapes.append(shape) labels.append(label) return warpeds, shapes, labels
def test_export_image_jpg(mock_open, exists, PILImage): exists.return_value = False fake_path = '/fake/fake.jpg' with open(fake_path) as f: type(f).name = PropertyMock(return_value=fake_path) mio.export_image(test_img, f, extension='jpg') assert PILImage.fromarray.return_value.save.call_count == 1
def crop_face(): filePathSimple = NAME OF THE DIRECTOERY files = os.listdir(filePathSimple) filePath = THE PATH OF VIDEOs vids = import_videos(filePathSimple) detector = load_ffld2_frontal_face_detector() for utterance in print_progress(vids): # vids is a list with all videos fileName = files.pop(0) whole_path_of_video = filePath + "/" + fileName path_where_you_want_the_uncropped_images_to_be_stored = fileName + "_" + "uncropped" same_as_before_with_cropped_images = fileName + "_" + "cropped" # cropped_image_location = fileName + "_" + "cropped2" cropped_image_location = same_as_before_with_cropped_images print(fileName + " Cropping") fps_check = subprocess.check_output('ffprobe '+whole_path_of_video+' 2>&1 | grep fps',shell=True) fps_check = str(fps_check, 'utf-8') # fps = float(fps_check.split(' fps')[0].split(',')[-1][1:]) ## in our case we know fps=30 so you can just put this fps = 30 if not os.path.exists(path_where_you_want_the_uncropped_images_to_be_stored): os.makedirs(path_where_you_want_the_uncropped_images_to_be_stored) if not os.path.exists(same_as_before_with_cropped_images): os.makedirs(same_as_before_with_cropped_images) subprocess.call('ffmpeg -loglevel panic -i '+whole_path_of_video+' -vf fps='+ str(fps)+' '+path_where_you_want_the_uncropped_images_to_be_stored+'/%05d.jpg',shell=True) vv = mio.import_images(path_where_you_want_the_uncropped_images_to_be_stored+'/*.jpg') for cnt, im in enumerate(vv): name = '{0:05d}'.format(cnt+1) # i select to start the images names from 1 and not 0 lns = detector(im) if im.landmarks.n_groups == 0: # there are no detections continue if im.landmarks.n_groups == 1: im.constrain_landmarks_to_bounds() mio.export_image(im.crop_to_landmarks(), cropped_image_location+'/'+name+'.jpg', extension=None, overwrite=True) elif im.landmarks.n_groups > 1: for i in range(im.landmarks.n_groups): im.constrain_landmarks_to_bounds() mio.export_image(im.crop_to_landmarks(group='ffld2_'+str(i)), cropped_image_location+'/'+name+'_'+str(i)+'.jpg', extension=None, overwrite=True) subprocess.call("mv " + filePathSimple + "/" + fileName + " " + filePathSimple + "/Finished", shell=True) print(fileName + " Finished") print("All Done")
def save_images_to_dir(images, out_path, output_ext='.jpg'): from menpo.visualize import print_progress import menpo.io as mio if not out_path.exists(): out_path.mkdir() for k, im in enumerate( print_progress(images, prefix='Saving images to disk')): mio.export_image(im, out_path / '{}{}'.format(k, output_ext))
def aux_export_image_loop(base_p, frames, clip_name, iter1=10): # exports the same image a number of times (used for testing). from menpo.io import import_builtin_asset, export_image im = import_builtin_asset.lenna_png() from utils import mkdir_p p1 = mkdir_p(join(base_p, frames, clip_name, '')) for i in range(iter1): export_image(im, p1 + 'lenna_' + str(i) + '.png')
def get_jpg_string(im): # Gets the serialized jpg from a menpo `Image`. if not isinstance(im, Image): im = Image.init_from_channels_at_back(im) fp = BytesIO() mio.export_image(im, fp, extension='jpg') fp.seek(0) return fp.read()
def aux_export_image_loop(base_p, frames, clip_name, iter1=10): # exports the same image a number of times (used for testing). from menpo.io import import_builtin_asset, export_image im = import_builtin_asset.lenna_png() from utils import mkdir_p p1 = mkdir_p(join(base_p, frames, clip_name, '')) for i in range(iter1): export_image(im, p1 + 'lenna_' + str(i) + '.png')
def jpg_feature(image): """ :param image: :return: """ fp = BytesIO() mio.export_image(image, fp, extension='jpg') fp.seek(0) jpg_bytes = fp.read() return bytes_feature(jpg_bytes)
def fit_image_folder(imset, fitter, detector, top_dir, save_pts=False, save_warped=False, ext='.jpg'): """ fit an ATM/AAM to a directory of image folders and return: warpeds: the raw shape-normalized warped images appearances: the PCs of the shape-normalized warped images shapes: the locations of each landmark per image labels: the ID determined by the subfolder the image was found in """ warpeds = [] appearances = [] shapes = [] labels = [] folders = glob.glob(os.path.join(top_dir, '*')) new_folders = [ folder.replace(imset, '{}-aligned'.format(imset)) for folder in folders ] for label, folder in enumerate(tqdm(folders)): os.makedirs(new_folders[label], exist_ok=True) for im_fn in tqdm(glob.glob('{}/*{}'.format(folder, ext))): try: image = mio.import_image(im_fn) except: continue new_fn = im_fn.replace(imset, '{}-aligned'.format(imset)) bboxes = detector(image) if len(bboxes) < 1: print('no face found in {}'.format(im_fn)) continue result = fitter.fit_from_bb(image, bboxes[0], max_iters=20) warped = fitter.warped_images(image, [fitter.reference_shape]) if save_pts: save_pointcloud_as_landmark(Path(im_fn), result.final_shape) if save_warped: try: mio.export_image(menpo.image.Image(warped[0]), new_fn, overwrite=True) except: mio.export_image(warped[0], new_fn, overwrite=True) try: warpeds.append(warped[0].as_imageio()) except: warpeds.append(warped[0]) if hasattr(result, 'appearance_parameters'): appearances.append(result.appearance_parameters[-1]) shapes.append(result.final_shape) labels.append(label) return warpeds, appearances, shapes, labels
def createMeanAndStd(filenames, meanname, stdname): global debug #mean_image = np.zeros((1,110,110), dtype=np.float32) #std_image = np.zeros((1,110,110), dtype=np.float32) sum_mean_images = np.zeros((1, 128, 128), dtype=np.float32) sum_std_images = np.zeros((1, 128, 128), dtype=np.float32) len_total_data_images = 0 print("Loading mean ...") for filename in filenames: print(filename) f = h5py.File(filename, 'r') # Get the data data = {} for key in list(f.keys()): data[key] = np.asarray(list(f[key])) if debug: print() for image in data["images"]: sum_mean_images = sum_mean_images + image len_total_data_images += len(data["images"]) mean_images = np.array(sum_mean_images) / len_total_data_images del sum_mean_images print("Done") print("Loading std ...") for filename in filenames: print(filename) f = h5py.File(filename, 'r') # Get the data data = {} for key in list(f.keys()): data[key] = np.asarray(list(f[key])) if debug: print(len(images)) for image in data["images"]: sum_std_images = sum_std_images + np.power( (image - mean_images), 2) std_images = np.array(sum_std_images) / len_total_data_images std_images = np.sqrt(std_images) del sum_std_images print("Done") print("Exporting...") mio.export_image(mi.Image(mean_images), meanname) mio.export_image(mi.Image(std_images), stdname) print("Done")
def transform_pts_temp_v2(image_dir_path, db_name, type_name='trainval'): import menpo.io as mio from menpo.visualize import print_progress img_dir, lb_dir = mk_img_lb_dir(db_name, type_name) impaths = filter(lambda x: 'pts' not in x, os.listdir(image_dir_path)) for imgpath in print_progress(impaths): imgpath = os.path.join(image_dir_path, imgpath) newpath = transform_impath(imgpath) img = mio.import_image(newpath) img_suffix = img.path.suffix lb_suffix = '.txt' image_name = img.path.name.split('.')[0] image_type = img.path._str.split('/')[11] image_name = '%s_%s' % (image_type, image_name) dataType = filter(lambda x: x == image_type, support_types)[0] if dataType == 'afw': image_name_list = image_name.split('_') image_name_list.pop(2) # remove index image_name = '_'.join(image_name_list) img_path = os.path.join(img_dir, image_name + img_suffix) lb_path = os.path.join(lb_dir, image_name + lb_suffix) # save image mio.export_image(img, img_path, overwrite=True) # save label lb = ['None'] * 166 kps = [str(kp) for kp in (img.landmarks['PTS'].as_vector() + 1)] xy = img.landmarks['PTS'].bounds()[0][::-1] zk = img.landmarks['PTS'].bounds()[1][::-1] x = str(xy[0]) y = str(xy[1]) w = str(zk[0] - xy[0]) h = str(zk[1] - xy[1]) box_hxyw = [h] + [x] + [y] + [w] kps_t = np.array(kps[0::2]) kps[0::2] = kps[1::2] kps[1::2] = kps_t lb[30:] = kps lb[25:29] = box_hxyw with open(lb_path, 'a') as f: f.write(' '.join(lb) + '\n') fi = FaceImage(img_path) # fi.vis() pass
def transform_pts(image_dir_path, db_name, type_name='trainval', box_c=0): import menpo.io as mio from menpo.visualize import print_progress img_dir, lb_dir = mk_img_lb_dir(db_name, type_name) for img in print_progress(mio.import_images(image_dir_path, verbose=True)): img_suffix = img.path.suffix lb_suffix = '.txt' image_name = img.path.name.split('.')[0] img_path = os.path.join(img_dir, image_name + img_suffix) lb_path = os.path.join(lb_dir, image_name + lb_suffix) # save image mio.export_image(img, img_path, overwrite=True) # save label lb = ['None'] * (30 + img.landmarks['PTS'].n_points * 2) kps = [str(kp) for kp in img.landmarks['PTS'].as_vector() + 1] if box_c: pickle_path = img.path._str.split('.')[0] + '.pkl' with open(pickle_path, 'rb') as f: box = pickle.load(f)['box'] x = str(box[0]) y = str(box[1]) w = str(box[2] - box[0]) h = str(box[3] - box[1]) box_hxyw = [h] + [x] + [y] + [w] else: xy = img.landmarks['PTS'].bounds()[0][::-1] + 1 zk = img.landmarks['PTS'].bounds()[1][::-1] + 1 x = str(xy[0]) y = str(xy[1]) w = str(zk[0] - xy[0]) h = str(zk[1] - xy[1]) box_hxyw = [h] + [x] + [y] + [w] kps_t = np.array(kps[0::2]) kps[0::2] = kps[1::2] kps[1::2] = kps_t lb[30:] = kps lb[25:29] = box_hxyw with open(lb_path, 'w') as f: f.write(' '.join(lb)) fi = FaceImage(img_path) # fi.vis() pass
def export_landmark_visualization(r, id_, img): mio.export_image(img, path_landmark_visualization(r, id_), overwrite=True)
def _build_shape_desc(sd_path_in, _norm_imgs, target_shape, aligned_shapes, align_t, reference_frame, _icp_transform, _is_mc=False, group=None, target_align_shape=None, _shape_desc=svs_shape, align_group='align', target_group=None): sd_path_in = '{}'.format(sd_path_in) if not os.path.exists(sd_path_in): os.makedirs(sd_path_in) # Build Transform Using SVS xr, yr = reference_frame.shape # Draw Mask # mask_shape = mask_pc(align_t.apply(target_shape)) # mask_image = Image.init_blank((xr, yr)) # for pts in mask_shape.points: # mask_image.pixels[0, pts[0], pts[1]] = 1 # mio.export_image( # mask_image, # '{}/ref_mask.png'.format(sd_path_in), # overwrite=True # ) if (not glob.glob(sd_path_in + '/sd_*.gif')): target_group = target_group if not target_group is None else [ range(target_shape.n_points) ] for j, (a_s, tr, svsLms, groups) in enumerate( zip([target_shape] + aligned_shapes.tolist(), [AlignmentSimilarity(target_shape, target_shape)] + _icp_transform, [target_align_shape] + [ni.landmarks[align_group].lms for ni in _norm_imgs], [target_group] + [ group_from_labels(ni.landmarks[group]) for ni in _norm_imgs ])): print_dynamic(" - Shape Descriptor Training {} out of {}".format( j, len(aligned_shapes) + 1)) # Align shapes with reference frame temp_as = align_t.apply(a_s) points = temp_as.points # Store SVS Landmarks svsLmsPath = '{}/sd_{:04d}_lms.pts'.format(sd_path_in, j) svsLms = align_t.apply(tr.apply(svsLms)) if not os.path.exists(svsLmsPath): tempRef = reference_frame.copy() tempRef.landmarks['temp'] = svsLms mio.export_landmark_file(tempRef.landmarks['temp'], svsLmsPath) store_image = normalise_image(_shape_desc(temp_as, xr, yr, groups)) # Create gif from svs group # convert -delay 10 -loop 0 sd_0001_g*.png test.gif for ch in range(store_image.n_channels): channel_img = store_image.extract_channels(ch) mio.export_image(channel_img, '{}/sd_{:04d}_g{:02d}.png'.format( sd_path_in, j, ch), overwrite=True) subprocess.Popen([ 'convert', '-delay', '10', '-loop', '0', '{0}/sd_{1:04d}_g*.png'.format(sd_path_in, j), '{0}/sd_{1:04d}.gif'.format(sd_path_in, j) ])
def test_export_image_jpg(mock_open, exists, PILImage): exists.return_value = False with open('/tmp/test.jpg') as f: type(f).name = PropertyMock(return_value='/tmp/test.jpg') mio.export_image(f, test_img, extension='jpg') PILImage.save.assert_called_once()
def generate_interpolation_images(run_id, snapshot=None, grid_size=[1, 1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8): network_pkl = misc.locate_network_pkl(run_id, snapshot) if mp4 is None: mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4' num_frames = int(np.rint(duration_sec * mp4_fps)) random_state = np.random.RandomState(random_seed) print('Loading network from "%s"...' % network_pkl) G, D, Gs = misc.load_network_pkl(run_id, snapshot) print('Generating latent vectors...') shape = [num_frames, np.prod(grid_size) ] + Gs.input_shape[1:] # [frame, image, channel, component] all_latents = random_state.randn(*shape).astype(np.float32) all_latents = scipy.ndimage.gaussian_filter( all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap') all_latents /= np.sqrt(np.mean(np.square(all_latents))) lsfm_model = m3io.import_lsfm_model( '/home/baris/Projects/faceganhd/models/all_all_all.mat') lsfm_tcoords = \ mio.import_pickle('/home/baris/Projects/team members/stelios/UV_spaces_V2/UV_dicts/full_face/512_UV_dict.pkl')[ 'tcoords'] lsfm_params = [] result_subdir = misc.create_result_subdir(config.result_dir, config.desc) for png_idx in range(int(num_frames / minibatch_size)): start = time.time() print('Generating png %d-%d / %d... in ' % (png_idx * minibatch_size, (png_idx + 1) * minibatch_size, num_frames), end='') latents = all_latents[png_idx * minibatch_size:(png_idx + 1) * minibatch_size, 0] labels = np.zeros([latents.shape[0], 0], np.float32) images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_shrink=image_shrink) for i in range(minibatch_size): texture = Image(np.clip(images[i, 0:3] / 2 + 0.5, 0, 1)) mesh_raw = from_UV_2_3D(Image(images[i, 3:6])) normals = images[i, 6:9] normals_norm = (normals - normals.min()) / (normals.max() - normals.min()) mesh = lsfm_model.reconstruct(mesh_raw) lsfm_params.append(lsfm_model.project(mesh_raw)) t_mesh = TexturedTriMesh(mesh.points, lsfm_tcoords.points, texture, mesh.trilist) m3io.export_textured_mesh( t_mesh, os.path.join(result_subdir, '%06d.obj' % (png_idx * minibatch_size + i)), texture_extension='.png') mio.export_image( Image(normals_norm), os.path.join(result_subdir, '%06d_nor.png' % (png_idx * minibatch_size + i))) print('%0.2f seconds' % (time.time() - start)) mio.export_pickle(lsfm_params, os.path.join(result_subdir, 'lsfm_params.pkl')) open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def generate_fake_images(run_id, snapshot=None, grid_size=[1, 1], batch_size=8, num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=8): network_pkl = misc.locate_network_pkl(run_id, snapshot) if png_prefix is None: png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-' random_state = np.random.RandomState(random_seed) print('Loading network from "%s"...' % network_pkl) G, D, Gs = misc.load_network_pkl(run_id, snapshot) lsfm_model = m3io.import_lsfm_model( '/home/baris/Projects/faceganhd/models/all_all_all.mat') lsfm_tcoords = \ mio.import_pickle('/home/baris/Projects/team members/stelios/UV_spaces_V2/UV_dicts/full_face/512_UV_dict.pkl')[ 'tcoords'] lsfm_params = [] result_subdir = misc.create_result_subdir(config.result_dir, config.desc) for png_idx in range(int(num_pngs / batch_size)): start = time.time() print('Generating png %d-%d / %d... in ' % (png_idx * batch_size, (png_idx + 1) * batch_size, num_pngs), end='') latents = misc.random_latents(np.prod(grid_size) * batch_size, Gs, random_state=random_state) labels = np.zeros([latents.shape[0], 0], np.float32) images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_shrink=image_shrink) for i in range(batch_size): if images.shape[1] == 3: mio.export_pickle( images[i], os.path.join( result_subdir, '%s%06d.pkl' % (png_prefix, png_idx * batch_size + i))) # misc.save_image(images[i], os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx*batch_size+i)), [0,255], grid_size) elif images.shape[1] == 6: mio.export_pickle(images[i][3:6], os.path.join( result_subdir, '%s%06d.pkl' % (png_prefix, png_idx * batch_size + i)), overwrite=True) misc.save_image( images[i][0:3], os.path.join( result_subdir, '%s%06d.png' % (png_prefix, png_idx * batch_size + i)), [-1, 1], grid_size) elif images.shape[1] == 9: texture = Image(np.clip(images[i, 0:3] / 2 + 0.5, 0, 1)) mesh_raw = from_UV_2_3D(Image(images[i, 3:6])) normals = images[i, 6:9] normals_norm = (normals - normals.min()) / (normals.max() - normals.min()) mesh = lsfm_model.reconstruct(mesh_raw) lsfm_params.append(lsfm_model.project(mesh_raw)) t_mesh = TexturedTriMesh(mesh.points, lsfm_tcoords.points, texture, mesh.trilist) m3io.export_textured_mesh( t_mesh, os.path.join(result_subdir, '%06d.obj' % (png_idx * minibatch_size + i)), texture_extension='.png') mio.export_image( Image(normals_norm), os.path.join( result_subdir, '%06d_nor.png' % (png_idx * minibatch_size + i))) shape = images[i, 3:6] shape_norm = (shape - shape.min()) / (shape.max() - shape.min()) mio.export_image( Image(shape_norm), os.path.join( result_subdir, '%06d_shp.png' % (png_idx * minibatch_size + i))) mio.export_pickle( t_mesh, os.path.join(result_subdir, '%06d.pkl' % (png_idx * minibatch_size + i))) print('%0.2f seconds' % (time.time() - start)) open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def main(): test_data_directory = ('./test_data_face/render') # load obj face_mesh = m3io.import_mesh('./test_data_face/mesh.obj') texture_index = (face_mesh.tcoords.points[:, ::-1] * face_mesh.texture.shape).astype(np.int32) vertex_color = face_mesh.texture.pixels[:, 1 - texture_index[:, 0], texture_index[:, 1]].T tf.reset_default_graph() # Set up a basic cube centered at the origin, with vertex normals pointing # outwards along the line from the origin to the cube vertices: face_vertices = tf.constant(face_mesh.points, dtype=tf.float32) face_normals = tf.nn.l2_normalize(face_vertices, dim=1) face_triangles = tf.constant(face_mesh.trilist, dtype=tf.int32) # testRendersSimpleCube: """Renders a simple cube to test the full forward pass. Verifies the functionality of both the custom kernel and the python wrapper. """ n_randering = 16 model_transforms = camera_utils.euler_matrices( tf.random_uniform([n_randering, 3]) * np.pi / 2 - np.pi / 4.)[:, :3, :3] vertices_world_space = tf.matmul(tf.stack( [face_vertices for _ in range(n_randering)]), model_transforms, transpose_b=True) normals_world_space = tf.matmul(tf.stack( [face_normals for _ in range(n_randering)]), model_transforms, transpose_b=True) # camera position: eye = tf.constant(n_randering * [[0.0, 0.0, 6.0]], dtype=tf.float32) center = tf.constant(n_randering * [[0.0, 0.0, 0.0]], dtype=tf.float32) world_up = tf.constant(n_randering * [[0.0, 1.0, 0.0]], dtype=tf.float32) ambient_colors = tf.constant(n_randering * [[0.2, 0.2, 0.2]], dtype=tf.float32) image_width = 256 image_height = 256 light_positions = tf.constant(n_randering * [[[6.0, 6.0, 6.0], [-6.0, -6.0, 6.0]]]) light_intensities = tf.ones([n_randering, 1, 3], dtype=tf.float32) vertex_diffuse_colors = tf.constant(np.stack( [vertex_color for _ in range(n_randering)]), dtype=tf.float32) rendered = mesh_renderer.mesh_renderer( vertices_world_space, triangles=face_triangles, normals=normals_world_space, diffuse_colors=vertex_diffuse_colors, camera_position=eye, camera_lookat=center, camera_up=world_up, light_positions=light_positions, light_intensities=light_intensities, image_width=image_width, image_height=image_height, ambient_color=ambient_colors) image_id = 0 with tf.Session() as sess: fps_list = [] while (image_id < 100): start_time = time.time() images = sess.run(rendered, feed_dict={}) for image in images: target_image_name = 'Gray_face_%i.png' % image_id image_id += 1 baseline_image_path = os.path.join(test_data_directory, target_image_name) mio.export_image(Image.init_from_channels_at_back( image[..., :3].clip(0, 1)), baseline_image_path, overwrite=True) end_time = time.time() fps = n_randering / (end_time - start_time) fps_list.append(fps) if len(fps_list) > 5: fps_list.pop(0) print(np.mean(fps_list))
def read_img(image_path, aug_mode, shear_x, shear_y, output_path, degree=0, align=True, crop_around_bb=True, save=True): ''' Read the image in the given image path and apply the mentioned transformation steps. :param image_path: Path to the image :param aug_mode: Mode for augmrntation, can be any of the options below: :param align: If True align the images based on corner of the eyes :param crop_around_bb: If True crop the image arounf an extension of ground truth bounding box (adding 40% of the length to the top) :return: Return pixels of this image and landmarks after processing the desired transformation ''' # Read the image with Menpo Library img = mio.import_image(image_path) land_path = image_path[:-4].replace('Images', 'AAM_landmarks') + "_aam.txt" with open(land_path) as file: tmp = np.array([[float(x) for x in line.split()] for line in file]) # Swapping Columns (Y,X) -> (X,Y) result = np.zeros((66, 2)) result[:, 0] = tmp[:, 1] result[:, 1] = tmp[:, 0] # Adding Landmarks P = PointCloud(result) img.landmarks.__setitem__('face_ibug_66_trimesh', P) # Align the images based on eye corners if aug_mode == 0 and align: Leye_Rcorner = img.landmarks['face_ibug_66_trimesh'].points[39, :] Reye_Lcorner = img.landmarks['face_ibug_66_trimesh'].points[42, :] dx = Reye_Lcorner[1] - Leye_Rcorner[1] dy = Leye_Rcorner[0] - Reye_Lcorner[0] theta = np.arctan(dy / (1.0 * dx)) img = img.rotate_ccw_about_centre(-theta, degrees=False, retain_shape=False) elif aug_mode == 1 or aug_mode == 8: # Rotate the image with a random degree less equal to the given degree img = img.rotate_ccw_about_centre(degree, degrees=True, retain_shape=False) elif aug_mode == 2 or aug_mode == 9: img = img.rotate_ccw_about_centre(-degree, degrees=True, retain_shape=False) elif aug_mode == 3: img = img.mirror(axis=1, return_transform=False) elif aug_mode == 4: shear_tr = Affine.init_from_2d_shear(shear_x, shear_y) img = img.transform_about_centre(shear_tr) elif aug_mode == 5: img = img.mirror(axis=1, return_transform=False) shear_tr = Affine.init_from_2d_shear(shear_x, shear_y) img = img.transform_about_centre(shear_tr) elif aug_mode == 6: img = img.mirror(axis=1, return_transform=False) img = img.rotate_ccw_about_centre(degree, degrees=True, retain_shape=False) elif aug_mode == 7: img = img.mirror(axis=1, return_transform=False) img = img.rotate_ccw_about_centre(-degree, degrees=True, retain_shape=False) # Crop the bounding box based on lanrmakrs and add 40% to its top part if crop_around_bb: bb = img.landmarks['face_ibug_66_trimesh'].bounding_box() d_y = 0.4 * (bb.points[1, 0] - bb.points[0, 0]) bb.points[0, 0] -= d_y bb.points[3, 0] -= d_y # Crop the images around the bounding box and resize it img = img.crop_to_pointcloud_proportion(bb, 0.5) img = img.resize(shape=(224, 224)) if save: mio.export_image(img, output_path + 'AU_' + str(aug_mode) + '_' + img.path.name, overwrite=True) return img.pixels, img.landmarks['face_ibug_66_trimesh'].points
def test_export_image_jpg(mock_open, exists, PILImage): exists.return_value = False with open('/tmp/test.jpg') as f: type(f).name = PropertyMock(return_value='/tmp/test.jpg') mio.export_image(f, test_img, extension='jpg') PILImage.save.assert_called_once()
def plot_image_latex_with_subcaptions(folds, pb, pout, name_im, legend_names=None, normalise=None, allow_fail=False, overwr=True): """ Customised function for my papers. It plots variations of an image (i.e. different images) next to each other with the respective legend names. The idea is: Import one by one from the folds, normalise (e.g. resize) and export each with a predictable name. Write the tex file and compile it to create the image with the several subplots and the custom labels. Attention: Because of latex compilation, this function writes and reads from the disk, so pay attention to the pout path. :param folds: (list) Names of the parent folders to search the image to. The assumption is that all those are relative to pb path. :param pb: (str) Base path where the images to be imported exist. :param pout: (str) Path to export the result in. The method will write the result in a new sub-folder named 'concatenated'. :param name_im: (str) Name (stem + suffix) of the image to be imported from folds. :param legend_names: (optional, list or None) If provided, it should match in length the folds; each one will be respectively provided as a sub-caption to the respective image. :param normalise: (optional, list of functions or None) If not None, then the function accepts a menpo image and normalises it. :param allow_fail: (optional, list or bool) If bool, it is converted into a list of length(folds). The images from folds that do not exist will be ignored if allow_fail is True. :param overwr: (optional, bool) To overwrite or not the intermediate results written. :return: # TODO: extend the formulation to provide freedom in the number of elements per line etc. """ # # short lambda for avoiding the long import command. import_im = lambda p, norm=False: mio.import_image(p, landmark_resolver=None, normalize=norm) # # names_imout: Names of the output images in the disk. # # names_meth: Method of the name to put in the sub-caption. names_imout, names_meth = [], [] # # if allow_fail is provided as a single boolean, convert into a list, i.e. # # each one of the folders has different permissions. if not isinstance(allow_fail, list): allow_fail = [allow_fail for _ in range(len(folds))] # # if normalise is provided as a single boolean, convert into a list. if not isinstance(normalise, list): normalise = [normalise for _ in range(len(folds))] for cnt, fold in enumerate(folds): if allow_fail[cnt]: # # In this case, we don't mind if an image fails. try: im = import_im(join(pb, fold, name_im)) except: continue else: im = import_im(join(pb, fold, name_im)) # # get the name for the sub-caption (legend). if legend_names is not None: if '_' in legend_names[cnt]: print('WARNING: `_` found on legend name, possibly issue with latex.') names_meth.append(legend_names[cnt]) else: assert 0, 'Not implemented for now! Need to use map_to_name()' # # Optionally resize the image. if normalise[cnt]: im = normalise[cnt](im) # # export the image into the disk and append the name exported in the list. nn = '{}_{}'.format(Path(fold).stem, im.path.name) mio.export_image(im, pout + nn, overwrite=overwr) names_imout.append(nn) # # export into a file the latex command. nlat = Path(name_im).stem fo = open(pout + '{}.tex'.format(nlat),'wt') fo.writelines(('\\documentclass{article}\\usepackage{amsmath}' '\n\\usepackage{graphicx}\\usepackage{subfig}' '\\begin{document}\n')) list_to_latex(names_imout, wrap_subfloat=True, names_subfl=names_meth, pbl='', file_to_print=fo, caption=False) fo.writelines('\\thispagestyle{empty}\\end{document}\n') fo.close() # # the concatenated for the final png pout1 = Path(mkdir_p(join(pout, 'concatenated', ''))) # # create the png image and delete the tex and intermediate results. cmd = ('cd {0}; pdflatex {1}.tex; pdfcrop {1}.pdf;' 'rm {1}.aux {1}.log {1}.pdf; mv {1}-crop.pdf {2}.pdf;' 'pdftoppm -png {2}.pdf > {2}.png; rm {2}.pdf; rm {0}*.png; rm {0}*.tex') nconc = pout1.stem + sep + nlat return popen(cmd.format(pout, nlat, nconc))
def export_shape_nicp_visualization(r, id_, img): mio.export_image(img, path_shape_nicp_visualization(r, id_), overwrite=True)
os.makedirs(same_as_before_with_cropped_images) subprocess.call('ffmpeg -loglevel panic -i ' + whole_path_of_video + ' -vf fps=' + str(fps) + ' ' + path_where_you_want_the_uncropped_images_to_be_stored + '/%05d.jpg', shell=True) vv = mio.import_images( path_where_you_want_the_uncropped_images_to_be_stored + '/*.jpg') for cnt, im in enumerate(vv): name = '{0:05d}'.format(cnt + 1) lns = detector(im) if im.landmarks.n_groups == 0: # there are no detections continue if im.landmarks.n_groups == 1: im.constrain_landmarks_to_bounds() mio.export_image(im.crop_to_landmarks(), cropped_image_location + '/' + name + '.jpg', extension=None, overwrite=True) elif im.landmarks.n_groups > 1: for i in range(im.landmarks.n_groups): im.constrain_landmarks_to_bounds() mio.export_image(im.crop_to_landmarks(group='ffld2_' + str(i)), cropped_image_location + '/' + name + '_' + str(i) + '.jpg', extension=None, overwrite=True)
def generate_interpolation_images(run_id, snapshot=None, grid_size=[1, 1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8): network_pkl = misc.locate_network_pkl(run_id, snapshot) if mp4 is None: mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4' num_frames = int(np.rint(duration_sec * mp4_fps)) random_state = np.random.RandomState(random_seed) print('Loading network from "%s"...' % network_pkl) G, D, Gs = misc.load_network_pkl(run_id, snapshot) print('Generating latent vectors...') shape = [num_frames, np.prod(grid_size)] + [ Gs.input_shape[1:][0] + Gs.input_shapes[1][1:][0] ] # [frame, image, channel, component] all_latents = random_state.randn(*shape).astype(np.float32) all_latents = scipy.ndimage.gaussian_filter( all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap') all_latents /= np.sqrt(np.mean(np.square(all_latents))) #10 10 10 10 5 3 10 # model = mio.import_pickle('../models/lsfm_shape_model_fw.pkl') # facesoft_model = mio.import_pickle('../models/facesoft_id_and_exp_3d_face_model.pkl')['shape_model'] # lsfm_model = m3io.import_lsfm_model('/home/baris/Projects/faceganhd/models/all_all_all.mat') # model_mean = lsfm_model.mean().copy() # mask = mio.import_pickle('../UV_spaces_V2/mask_full_2_crop.pkl') lsfm_tcoords = \ mio.import_pickle('512_UV_dict.pkl')['tcoords'] lsfm_params = [] result_subdir = misc.create_result_subdir(config_test.result_dir, config_test.desc) for png_idx in range(int(num_frames / minibatch_size)): start = time.time() print('Generating png %d-%d / %d... in ' % (png_idx * minibatch_size, (png_idx + 1) * minibatch_size, num_frames), end='') latents = all_latents[png_idx * minibatch_size:(png_idx + 1) * minibatch_size, 0, :Gs.input_shape[1:][0]] labels = all_latents[png_idx * minibatch_size:(png_idx + 1) * minibatch_size, 0, Gs.input_shape[1:][0]:] labels_softmax = softmax(labels) * np.array([10, 10, 10, 10, 5, 3, 10]) images = Gs.run(latents, labels_softmax, minibatch_size=minibatch_size, num_gpus=config_test.num_gpus, out_shrink=image_shrink) for i in range(minibatch_size): texture = Image(np.clip(images[i, 0:3] / 2 + 0.5, 0, 1)) img_shape = ndimage.gaussian_filter(images[i, 3:6], sigma=(0, 3, 3), order=0) mesh_raw = from_UV_2_3D(Image(img_shape), topology='full', uv_layout='oval') # model_mean.points[mask,:] = mesh_raw.points normals = images[i, 6:9] normals_norm = (normals - normals.min()) / (normals.max() - normals.min()) mesh = mesh_raw #facesoft_model.reconstruct(model_mean).from_mask(mask) # lsfm_params.append(lsfm_model.project(mesh_raw)) t_mesh = TexturedTriMesh(mesh.points, lsfm_tcoords.points, texture, mesh.trilist) m3io.export_textured_mesh( t_mesh, os.path.join(result_subdir, '%06d.obj' % (png_idx * minibatch_size + i)), texture_extension='.png') fix_obj( os.path.join(result_subdir, '%06d.obj' % (png_idx * minibatch_size + i))) mio.export_image( Image(normals_norm), os.path.join(result_subdir, '%06d_nor.png' % (png_idx * minibatch_size + i))) print('%0.2f seconds' % (time.time() - start)) mio.export_pickle(lsfm_params, os.path.join(result_subdir, 'lsfm_params.pkl')) open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def fit(imagepath): image = mio.import_image(imagepath, normalize=False) if len(image.pixels.shape) == 2: image.pixels = np.stack([image.pixels, image.pixels, image.pixels]) if image.pixels.shape[0] == 1: image.pixels = np.concatenate( [image.pixels, image.pixels, image.pixels], axis=0) print(image.pixels_with_channels_at_back().shape) bb = detect(image.pixels_with_channels_at_back())[0] initial_shape = aam_fitter.fit_from_bb(image, bb).final_shape result = fitter.fit_from_shape(image, initial_shape, max_iters=40, camera_update=True, focal_length_update=False, reconstruction_weight=1, shape_prior_weight=.4e8, texture_prior_weight=1., landmarks_prior_weight=1e5, return_costs=True, init_shape_params_from_lms=False) mesh = ColouredTriMesh(result.final_mesh.points, result.final_mesh.trilist) def transform(mesh): return result._affine_transforms[-1].apply( result.camera_transforms[-1].apply(mesh)) mesh_in_img = transform(lambertian_shading(mesh)) expr_dir = image.path.parent p = image.path.stem raster = rasterize_mesh(mesh_in_img, image.shape) uv_shape = (600, 1000) template = shape_model.mean() unwrapped_template = optimal_cylindrical_unwrap(template).apply(template) minimum = unwrapped_template.bounds(boundary=0)[0] unwrapped_template = Translation(-minimum).apply(unwrapped_template) unwrapped_template.points = unwrapped_template.points[:, [1, 0]] unwrapped_template.points[:, 0] = unwrapped_template.points[:, 0].max( ) - unwrapped_template.points[:, 0] unwrapped_template.points *= np.array([.40, .31]) unwrapped_template.points *= np.array([uv_shape]) bcoords_img, tri_index_img = rasterize_barycentric_coordinate_images( unwrapped_template, uv_shape) TI = tri_index_img.as_vector() BC = bcoords_img.as_vector(keep_channels=True).T def masked_texture(mesh_in_image, background): sample_points_3d = mesh_in_image.project_barycentric_coordinates( BC, TI) texture = bcoords_img.from_vector( background.sample(sample_points_3d.points[:, :2])) return texture uv = masked_texture(mesh_in_img, image) t = TexturedTriMesh( result.final_mesh.points, image_coords_to_tcoords(uv.shape).apply(unwrapped_template).points, uv, mesh_in_img.trilist) m3io.export_textured_mesh(t, str(expr_dir / Path(p).with_suffix('.mesh.obj')), overwrite=True) mio.export_image(raster, str(expr_dir / Path(p).with_suffix('.render.jpg')), overwrite=True)
fps = 30 subprocess.call('ffmpeg -loglevel panic -i ' + pb + filename + ' -vf fps=' + str(fps) + ' ' + puncrop + filename.split('.')[0] + '/%05d.jpg', shell=True) vv = mio.import_images(puncrop + filename.split('.')[0] + '/*.jpg') # import the video for cnt, im in enumerate(vv): lns = detector(im) name = '{0:05d}'.format(cnt + 1) if im.landmarks.n_groups == 0: continue if im.landmarks.n_groups == 1: im.constrain_landmarks_to_bounds() mio.export_image(im.crop_to_landmarks(), pcrop + filename.split('.')[0] + '/' + name + '.jpg', extension=None, overwrite=True) elif im.landmarks.n_groups > 1: keys = im.landmarks.keys() keys = sorted(keys) for key in keys: im.constrain_landmarks_to_bounds() mio.export_image(im.crop_to_landmarks(group=key), pcrop + filename.split('.')[0] + '/' + name + '-' + key + '.jpg', extension=None, overwrite=True)
f.close() mean_image = np.zeros((227, 227), dtype=np.float32) std_image = np.zeros((227, 227), dtype=np.float32) db_size = 5 * 10000 print(db_size) print('Compute mean image...') for images in h5: for image in images: #mean_image = mean_image + image.transpose(2, 0, 1)/db_size mean_image = mean_image + image / db_size mio.export_image( mi.Image(mean_image), "/media/isen/Data_windows/PROJET_M1_DL/Affect-Net/MAN/classes227/mean_image.png", overwrite=True) print('Compute std image...') for images in h5: for image in images: std_image = std_image + np.power((image - mean_image), 2) / db_size std_image = np.sqrt(std_image) mio.export_image( mi.Image(std_image), "/media/isen/Data_windows/PROJET_M1_DL/Affect-Net/MAN/classes227/std_image.png", overwrite=True) print('saving in h5 file') g = h5py.File( "/media/isen/Data_windows/PROJET_M1_DL/Affect-Net/MAN/classes227/mean_std.hdf5",
def affine_enhance(path_to_images, save_dir=None, scales=[1], rotations=[0], translations=[[0, 0]], mean_shape=1): if save_dir is not None: mk_dir(save_dir, 0) # load training images train_images = [] for path_to_image in path_to_images: for img in print_progress( mio.import_images(path_to_image, verbose=True)): train_images.append(img) print 'sum of training data: %d' % len(train_images) # create pca model based on training set # shape_model = pca(path_train_images) shape_model = pca_image(train_images) excepted_num = len(scales) * len(rotations) * len(translations) * len( train_images) completed_num = 0 for train_img in train_images: if mean_shape: transform = AlignmentAffine(train_img.landmarks['PTS'], shape_model.model.mean()) [r1, s, r2, t] = transform.decompose() # transform = r2.compose_after(s.compose_after(r1)) transform = r2.compose_after(r1) rotation_shape = transform.apply(train_img.landmarks['PTS']) offset = train_img.landmarks['PTS'].centre( ) - rotation_shape.centre() t = compositions.Translation(offset, train_img.n_dims) transform = t.compose_after(r2.compose_after(r1)) normal_image = train_img.warp_to_shape(train_img.shape, transform.pseudoinverse(), warp_landmarks=True, order=1, mode='nearest', return_transform=False) else: normal_image = train_img for scale in scales: for rotation in rotations: for translation in translations: s = compositions.scale_about_centre( normal_image.landmarks['PTS'], scale) r = compositions.rotate_ccw_about_centre( normal_image, rotation) t = compositions.Translation(translation, normal_image.n_dims) transform = t.compose_after(s.compose_after(r)) # warp image new_image = normal_image.warp_to_shape( normal_image.shape, transform.pseudoinverse(), warp_landmarks=True, order=1, mode='nearest', return_transform=False) # plt.subplot(121) # normal_image.view_landmarks(marker_face_colour='white', marker_edge_colour='black', # marker_size=4, render_axes=True) # plt.gca().set_title('Original image') # plt.subplot(122) # new_image.view_landmarks(marker_face_colour='white', marker_edge_colour='black', # marker_size=4, render_axes=True) # plt.gca().set_title('Rescale image') # plt.close('all') # save enhanced image with lable img_suffix = new_image.path.suffix lb_suffix = '.pts' dataType = filter(lambda x: x in str(new_image.path), support_types)[0] new_image_name = '%s_' % dataType + new_image.path.name.split( '.')[0] + '_s%s_r%s_x%s_y%s' % ( str(scale), str(rotation), str( translation[0]), str(translation[1])) img_path = os.path.join(save_dir, new_image_name + img_suffix) lb_path = os.path.join(save_dir, new_image_name + lb_suffix) mio.export_image(new_image, img_path, overwrite=True) mio.export_landmark_file(new_image.landmarks['PTS'], lb_path, overwrite=True) # plt.subplot(121) # new_image.view_landmarks(marker_face_colour='white', marker_edge_colour='black', # marker_size=4, render_axes=True) # plt.gca().set_title('new image') # save_image = mio.import_image(img_path) # plt.subplot(122) # save_image.view_landmarks(marker_face_colour='white', marker_edge_colour='black', # marker_size=4, render_axes=True) # plt.gca().set_title('saved image') # plt.close('all') completed_num = completed_num + 1 print 'completed: %d/%d' % (completed_num, excepted_num)