def __init__(self, face_cropper, gamma=0.2, sigma0=1, sigma1=2, size=5, threshold=10., alpha=0.1, **kwargs): Base.__init__(self, **kwargs) # call base class constructor with its set of parameters Preprocessor.__init__(self, face_cropper=face_cropper, gamma=gamma, sigma0=sigma0, sigma1=sigma1, size=size, threshold=threshold, alpha=alpha) self.cropper = load_cropper(face_cropper) self.tan_triggs = bob.ip.base.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
def __init__( self, face_cropper, gamma = 0.2, sigma0 = 1, sigma1 = 2, size = 5, threshold = 10., alpha = 0.1, **kwargs ): Base.__init__(self, **kwargs) # call base class constructor with its set of parameters Preprocessor.__init__( self, face_cropper = face_cropper, gamma = gamma, sigma0 = sigma0, sigma1 = sigma1, size = size, threshold = threshold, alpha = alpha ) self.cropper = load_cropper(face_cropper) self.tan_triggs = bob.ip.base.TanTriggs(gamma, sigma0, sigma1, size, threshold, alpha)
def __init__( self, max_iterations=10, # 10 iterations for the convergence_threshold=0.0005, variance_threshold=0.0005, win_length_ms=20., # 20 ms win_shift_ms=10., # 10 ms smoothing_window=10, # 10 frames (i.e. 100 ms) **kwargs): # call base class constructor with its set of parameters Preprocessor.__init__( self, max_iterations=max_iterations, convergence_threshold=convergence_threshold, variance_threshold=variance_threshold, win_length_ms=win_length_ms, win_shift_ms=win_shift_ms, smoothing_window=smoothing_window, ) # copy parameters self.max_iterations = max_iterations self.convergence_threshold = convergence_threshold self.variance_threshold = variance_threshold self.win_length_ms = win_length_ms self.win_shift_ms = win_shift_ms self.smoothing_window = smoothing_window
def __init__( self, max_iterations = 10, # 10 iterations for the convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., # 20 ms win_shift_ms = 10., # 10 ms smoothing_window = 10, # 10 frames (i.e. 100 ms) **kwargs ): # call base class constructor with its set of parameters Preprocessor.__init__( self, max_iterations = max_iterations, convergence_threshold = convergence_threshold, variance_threshold = variance_threshold, win_length_ms = win_length_ms, win_shift_ms = win_shift_ms, smoothing_window = smoothing_window, ) # copy parameters self.max_iterations = max_iterations self.convergence_threshold = convergence_threshold self.variance_threshold = variance_threshold self.win_length_ms = win_length_ms self.win_shift_ms = win_shift_ms self.smoothing_window = smoothing_window
def __init__(self, dtype=None, color_channel='gray', **kwargs): Preprocessor.__init__(self, dtype=str(dtype), color_channel=color_channel, **kwargs) self.channel = color_channel self.dtype = dtype
def __init__( self, face_cropper, radius = 2, # Radius of the LBP is_circular = True, # use circular LBP? compare_to_average = False, elbp_type = 'regular', **kwargs ): """Parameters of the constructor of this preprocessor: face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None`` The face image cropper that should be applied to the image. It might be specified as a registered resource, a configuration file, or an instance of a preprocessor. .. note:: The given class needs to contain a ``crop_face`` method. radius : int The radius of the LBP features to extract is_circular : bool Whether to extract circular LBP features, or square features compare_to_average : bool Compare to the average value of all pixels, or to the central one elbp_type : str The way, LBP features are extracted, see :py:class:`bob.ip.base.LBP` for more details. kwargs Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``. """ # call base class constructors Base.__init__(self, **kwargs) Preprocessor.__init__( self, face_cropper = face_cropper, radius = radius, is_circular = is_circular, compare_to_average = compare_to_average, elbp_type = elbp_type ) # lbp extraction self.lbp_extractor = bob.ip.base.LBP( neighbors = 8, radius = radius, circular = is_circular, to_average = compare_to_average, add_average_bit = False, uniform = False, elbp_type = elbp_type, border_handling = 'wrap' ) self.cropper = load_cropper(face_cropper)
def __init__(self, face_cropper, sigma=math.sqrt(2.0), **kwargs): Base.__init__(self, **kwargs) # call base class constructor with its set of parameters Preprocessor.__init__(self, face_cropper=face_cropper, sigma=sigma) self.cropper = load_cropper(face_cropper) size = max(1, int(3.0 * sigma)) self.self_quotient = bob.ip.base.SelfQuotientImage(size_min=size, sigma=sigma)
def __init__( self, face_cropper, radius=2, # Radius of the LBP is_circular=True, # use circular LBP? compare_to_average=False, elbp_type='regular', **kwargs): """Parameters of the constructor of this preprocessor: face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None`` The face image cropper that should be applied to the image. It might be specified as a registered resource, a configuration file, or an instance of a preprocessor. .. note:: The given class needs to contain a ``crop_face`` method. radius : int The radius of the LBP features to extract is_circular : bool Whether to extract circular LBP features, or square features compare_to_average : bool Compare to the average value of all pixels, or to the central one elbp_type : str The way, LBP features are extracted, see :py:class:`bob.ip.base.LBP` for more details. kwargs Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``. """ # call base class constructors Base.__init__(self, **kwargs) Preprocessor.__init__(self, face_cropper=face_cropper, radius=radius, is_circular=is_circular, compare_to_average=compare_to_average, elbp_type=elbp_type) # lbp extraction self.lbp_extractor = bob.ip.base.LBP(neighbors=8, radius=radius, circular=is_circular, to_average=compare_to_average, add_average_bit=False, uniform=False, elbp_type=elbp_type, border_handling='wrap') self.cropper = load_cropper(face_cropper)
def __init__(self, face_cropper, sigma=math.sqrt(2.), **kwargs): Base.__init__(self, **kwargs) # call base class constructor with its set of parameters Preprocessor.__init__(self, face_cropper=face_cropper, sigma=sigma) self.cropper = load_cropper(face_cropper) size = max(1, int(3. * sigma)) self.self_quotient = bob.ip.base.SelfQuotientImage(size_min=size, sigma=sigma)
def __init__( self, cropped_image_size, cropped_positions, fixed_positions=None, mask_sigma=None, mask_neighbors=5, mask_seed=None, annotator=None, allow_upside_down_normalized_faces=False, **kwargs ): Base.__init__(self, **kwargs) # call base class constructor Preprocessor.__init__( self, cropped_image_size=cropped_image_size, cropped_positions=cropped_positions, fixed_positions=fixed_positions, mask_sigma=mask_sigma, mask_neighbors=mask_neighbors, mask_seed=mask_seed ) # check parameters assert len(cropped_positions) == 2 if fixed_positions: assert len(fixed_positions) == 2 # copy parameters self.cropped_image_size = cropped_image_size self.cropped_positions = cropped_positions self.cropped_keys = sorted(cropped_positions.keys()) self.fixed_positions = fixed_positions self.mask_sigma = mask_sigma self.mask_neighbors = mask_neighbors self.mask_rng = bob.core.random.mt19937( mask_seed) if mask_seed is not None else bob.core.random.mt19937() self.annotator = annotator self.allow_upside_down_normalized_faces = allow_upside_down_normalized_faces # create objects required for face cropping self.cropper = bob.ip.base.FaceEyesNorm( crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]]) self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
def __init__( self, face_cropper, **kwargs ): Base.__init__(self, **kwargs) # call base class constructor with its set of parameters Preprocessor.__init__( self, face_cropper = face_cropper, ) self.cropper = load_cropper(face_cropper)
def __init__( self, split_training_data_by_client=False, features_mask=numpy.zeros(90), # mask of which features to read **kwargs): # call base class constructor with its set of parameters Preprocessor.__init__(self, read_original_data=self.read_matlab_files, **kwargs) Extractor.__init__( self, requires_training=False, split_training_data_by_client=split_training_data_by_client, **kwargs) self.features_mask = features_mask
def __init__(self, cropped_image_size, cropped_positions, fixed_positions=None, mask_sigma=None, mask_neighbors=5, mask_seed=None, annotator=None, **kwargs): Base.__init__(self, **kwargs) # call base class constructor Preprocessor.__init__(self, cropped_image_size=cropped_image_size, cropped_positions=cropped_positions, fixed_positions=fixed_positions, mask_sigma=mask_sigma, mask_neighbors=mask_neighbors, mask_seed=mask_seed) # check parameters assert len(cropped_positions) == 2 if fixed_positions: assert len(fixed_positions) == 2 # copy parameters self.cropped_image_size = cropped_image_size self.cropped_positions = cropped_positions self.cropped_keys = sorted(cropped_positions.keys()) self.fixed_positions = fixed_positions self.mask_sigma = mask_sigma self.mask_neighbors = mask_neighbors self.mask_rng = bob.core.random.mt19937( mask_seed) if mask_seed is not None else bob.core.random.mt19937() self.annotator = annotator # create objects required for face cropping self.cropper = bob.ip.base.FaceEyesNorm( crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]]) self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
def __init__( self, cropped_image_size, # resolution of the cropped image, in order (HEIGHT,WIDTH); if not given, no face cropping will be performed cropped_positions, # dictionary of the cropped positions, usually: {'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)} fixed_positions=None, # dictionary of FIXED positions in the original image; if specified, annotations from the database will be ignored mask_sigma=None, # The sigma for random values areas outside image mask_neighbors=5, # The number of neighbors to consider while extrapolating mask_seed=None, # The seed for generating random values during extrapolation **kwargs # parameters to be written in the __str__ method ): Base.__init__(self, **kwargs) # call base class constructor Preprocessor.__init__(self, cropped_image_size=cropped_image_size, cropped_positions=cropped_positions, fixed_positions=fixed_positions, mask_sigma=mask_sigma, mask_neighbors=mask_neighbors, mask_seed=mask_seed) # check parameters assert len(cropped_positions) == 2 if fixed_positions: assert len(fixed_positions) == 2 # copy parameters self.cropped_image_size = cropped_image_size self.cropped_positions = cropped_positions self.cropped_keys = sorted(cropped_positions.keys()) self.fixed_positions = fixed_positions self.mask_sigma = mask_sigma self.mask_neighbors = mask_neighbors self.mask_rng = bob.core.random.mt19937( mask_seed) if mask_seed is not None else bob.core.random.mt19937() # create objects required for face cropping self.cropper = bob.ip.base.FaceEyesNorm( crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]]) self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
def __init__( self, max_iterations = 10, # 10 iterations for the convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., # 20 ms win_shift_ms = 10., # 10 ms smoothing_window = 10, # 10 frames (i.e. 100 ms) n_filters = 40, f_min = 0.0, # 0 Hz f_max = 4000, # 4 KHz pre_emphasis_coef = 1.0, ratio_threshold = 0.1, # 0.1 of the maximum energy **kwargs ): # call base class constructor with its set of parameters Preprocessor.__init__( self, max_iterations = max_iterations, convergence_threshold = convergence_threshold, variance_threshold = variance_threshold, win_length_ms = win_length_ms, win_shift_ms = win_shift_ms, smoothing_window = smoothing_window, n_filters = n_filters, f_min = f_min, f_max = f_max, pre_emphasis_coef = pre_emphasis_coef, ratio_threshold = ratio_threshold, ) # copy parameters self.max_iterations = max_iterations self.convergence_threshold = convergence_threshold self.variance_threshold = variance_threshold self.win_length_ms = win_length_ms self.win_shift_ms = win_shift_ms self.smoothing_window = smoothing_window self.n_filters = n_filters self.f_min = f_min self.f_max = f_max self.pre_emphasis_coef = pre_emphasis_coef self.ratio_threshold = ratio_threshold
def __init__( self, win_length_ms=20., # 20 ms win_shift_ms=10., # 10 ms smoothing_window=10, # 10 frames (i.e. 100 ms) ratio_threshold=0.15, # 0.1 of the maximum energy **kwargs): # call base class constructor with its set of parameters Preprocessor.__init__( self, win_length_ms=win_length_ms, win_shift_ms=win_shift_ms, smoothing_window=smoothing_window, ratio_threshold=ratio_threshold, ) # copy parameters self.win_length_ms = win_length_ms self.win_shift_ms = win_shift_ms self.smoothing_window = smoothing_window self.ratio_threshold = ratio_threshold
def __init__( self, cropped_image_size, # resolution of the cropped image, in order (HEIGHT,WIDTH); if not given, no face cropping will be performed cropped_positions, # dictionary of the cropped positions, usually: {'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)} fixed_positions = None, # dictionary of FIXED positions in the original image; if specified, annotations from the database will be ignored mask_sigma = None, # The sigma for random values areas outside image mask_neighbors = 5, # The number of neighbors to consider while extrapolating mask_seed = None, # The seed for generating random values during extrapolation **kwargs # parameters to be written in the __str__ method ): Base.__init__(self, **kwargs) # call base class constructor Preprocessor.__init__( self, cropped_image_size = cropped_image_size, cropped_positions = cropped_positions, fixed_positions = fixed_positions, mask_sigma = mask_sigma, mask_neighbors = mask_neighbors, mask_seed = mask_seed ) # check parameters assert len(cropped_positions) == 2 if fixed_positions: assert len(fixed_positions) == 2 # copy parameters self.cropped_image_size = cropped_image_size self.cropped_positions = cropped_positions self.cropped_keys = sorted(cropped_positions.keys()) self.fixed_positions = fixed_positions self.mask_sigma = mask_sigma self.mask_neighbors = mask_neighbors self.mask_rng = bob.core.random.mt19937(mask_seed) if mask_seed is not None else bob.core.random.mt19937() # create objects required for face cropping self.cropper = bob.ip.base.FaceEyesNorm(crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]]) self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
def __init__( self, win_length_ms = 20., # 20 ms win_shift_ms = 10., # 10 ms smoothing_window = 10, # 10 frames (i.e. 100 ms) ratio_threshold = 0.15, # 0.1 of the maximum energy **kwargs ): # call base class constructor with its set of parameters Preprocessor.__init__( self, win_length_ms = win_length_ms, win_shift_ms = win_shift_ms, smoothing_window = smoothing_window, ratio_threshold = ratio_threshold, ) # copy parameters self.win_length_ms = win_length_ms self.win_shift_ms = win_shift_ms self.smoothing_window = smoothing_window self.ratio_threshold = ratio_threshold
def __init__( self, face_cropper, cascade = None, use_flandmark = False, detection_overlap = 0.2, distance = 2, scale_base = math.pow(2., -1./16.), lowest_scale = 0.125, **kwargs ): # call base class constructors Base.__init__(self, **kwargs) Preprocessor.__init__( self, face_cropper = face_cropper, cascade = cascade, use_flandmark = use_flandmark, detection_overlap = detection_overlap, distance = distance, scale_base = scale_base, lowest_scale = lowest_scale ) assert face_cropper is not None self.sampler = bob.ip.facedetect.Sampler(scale_factor=scale_base, lowest_scale=lowest_scale, distance=distance) if cascade is None: self.cascade = bob.ip.facedetect.default_cascade() else: self.cascade = bob.ip.facedetect.Cascade(bob.io.base.HDF5File(cascade)) self.detection_overlap = detection_overlap self.flandmark = bob.ip.flandmark.Flandmark() if use_flandmark else None self.quality = None self.cropper = load_cropper_only(face_cropper)
def __init__(self, face_cropper, cascade=None, use_flandmark=False, detection_overlap=0.2, distance=2, scale_base=math.pow(2., -1. / 16.), lowest_scale=0.125, **kwargs): # call base class constructors Base.__init__(self, **kwargs) Preprocessor.__init__(self, face_cropper=face_cropper, cascade=cascade, use_flandmark=use_flandmark, detection_overlap=detection_overlap, distance=distance, scale_base=scale_base, lowest_scale=lowest_scale) assert face_cropper is not None self.sampler = bob.ip.facedetect.Sampler(scale_factor=scale_base, lowest_scale=lowest_scale, distance=distance) if cascade is None: self.cascade = bob.ip.facedetect.default_cascade() else: self.cascade = bob.ip.facedetect.Cascade( bob.io.base.HDF5File(cascade)) self.detection_overlap = detection_overlap self.flandmark = bob.ip.flandmark.Flandmark( ) if use_flandmark else None self.quality = None self.cropper = load_cropper_only(face_cropper)
def __init__(self, **kwargs): Preprocessor.__init__(self)
def __init__(self, return_none=False, **kwargs): Preprocessor.__init__(self) self.return_none = return_none
def __init__(self, return_none=False, probability_of_none=1, **kwargs): Preprocessor.__init__(self) self.return_none = return_none self.probability_of_none = probability_of_none
def __init__(self, dtype = None, color_channel = 'gray'): Preprocessor.__init__(self, dtype=str(dtype), color_channel=color_channel) self.channel = color_channel self.dtype = dtype
def __init__(self, win_length_ms=20.0, win_shift_ms=10.0, **kwargs): # 20 ms # 10 ms # call base class constructor with its set of parameters Preprocessor.__init__(self, win_length_ms=win_length_ms, win_shift_ms=win_shift_ms) # copy parameters self.win_length_ms = win_length_ms self.win_shift_ms = win_shift_ms
def __init__(self, **kwargs): Preprocessor.__init__(self, **kwargs) # Each class needs to have a constructor taking # all the parameters that are required for the preprocessing as arguments self._kwargs = kwargs pass