def test_lazylist_repeat(): ll = LazyList.init_from_iterable([0, 1]) ll_repeated = ll.repeat(2) assert len(ll_repeated) == 4 assert all([a == b for a, b in zip([0, 0, 1, 1], ll_repeated)])
def test_lazylist_init_from_iterable_with_f(): double_func = lambda x: x * 2 ll = LazyList.init_from_iterable([0, 1], f=double_func) assert ll[0] == 0 assert ll[1] == 2
def test_lazylist_slice_with_ndarray(): index = np.array([1, 0, 3], dtype=int) l = LazyList.init_from_iterable(["a", "b", "c", "d", "e"]) l_indexed = l[index] assert list(l_indexed) == ["b", "a", "d"]
def test_lazylist_init_from_iterable_identity(): ll = LazyList.init_from_iterable([0, 1]) assert ll[0] == 0 assert ll[1] == 1
def test_lazylist_slice_with_ndarray(): index = np.array([1, 0, 3], dtype=np.int) l = LazyList.init_from_iterable(['a', 'b', 'c', 'd', 'e']) l_indexed = l[index] assert list(l_indexed) == ['b', 'a', 'd']
def kf_sequence_image_paths_with_landmarks(i, expression): return LazyList.init_from_iterable( sorted([ p.with_suffix('.png') for p in kf_sequence_path(i, expression).glob('*.pts') ]))
def kf_sequence_image_paths(i, expression): return LazyList.init_from_iterable( mio.image_paths(kf_sequence_path(i, expression)))
def extract_save_features(self, files): r""" Uses the input files as train AAMs and store the resulting pickle on the disk Parameters ---------- files Returns ------- """ # 1. fetch all video frames, attach landmarks frames = mio.import_video(files[0], landmark_resolver=self._myresolver, normalize=True, exact_frame_count=True) # frames = frames.map(AAMFeature._preprocess) idx_above_thresh, idx_lip_opening = landmark_filter( files[0], self._landmarkDir, threshold=self._confidence_thresh, keep=self._kept_frames) frames = frames[idx_above_thresh] frames = frames[idx_lip_opening] frames = frames.map(attach_semantic_landmarks) if self._greyscale is True: frames = frames.map(convert_to_grayscale) # initial AAM training if self._warpType == 'holistic': aam = HolisticAAM(frames, group=self._landmarkGroup, holistic_features=self._features, reference_shape=None, diagonal=self._diagonal, scales=self._scales, max_shape_components=self._max_shape_components, max_appearance_components=self._max_appearance_components, verbose=False) elif self._warpType == 'patch': aam = PatchAAM(frames, group=self._landmarkGroup, holistic_features=self._features, diagonal=self._diagonal, scales=self._scales, max_shape_components=self._max_shape_components, max_appearance_components=self._max_appearance_components, patch_shape=self._extractOpts['patch_shape'], verbose=False) else: raise Exception('Unknown warp type. Did you mean holistic/patch ?') frame_buffer = LazyList.init_from_iterable([]) buffer_len = 256 for idx, file in enumerate(files[1:]): # useful to check progress with open('./run/log_' + self._outModelName + '.txt', 'w') as log: log.write(str(idx) + ' ' + file + '\n') frames = mio.import_video(file, landmark_resolver=self._myresolver, normalize=True, exact_frame_count=True) idx_above_thresh, idx_lip_opening = landmark_filter( file, landmark_dir=self._landmarkDir, threshold=self._confidence_thresh, keep=self._kept_frames) frames = frames[idx_above_thresh] frames = frames[idx_lip_opening] frames = frames.map(attach_semantic_landmarks) if self._greyscale is True: frames = frames.map(convert_to_grayscale) frame_buffer += frames if len(frame_buffer) > buffer_len: # 2. retrain AAM aam.increment(frame_buffer, group=self._landmarkGroup, shape_forgetting_factor=1.0, appearance_forgetting_factor=1.0, verbose=False, batch_size=None) del frame_buffer frame_buffer = LazyList.init_from_iterable([]) else: pass if len(frame_buffer) != 0: # # deplete remaining frames aam.increment(frame_buffer, group=self._landmarkGroup, shape_forgetting_factor=1.0, appearance_forgetting_factor=1.0, verbose=False, batch_size=None) del frame_buffer mio.export_pickle(obj=aam, fp=self._outDir + self._outModelName, overwrite=True, protocol=4)
def shape_nicps(r): return LazyList.init_from_iterable(paths_shape_nicp(r), f=_load_shape_nicp_for_path)