Example #1
0
  def __init__(
      self,
      block_size = 12,    # 1 or two parameters for block size
      block_overlap = 11, # 1 or two parameters for block overlap
      number_of_dct_coefficients = 45,
      normalize_blocks = True,
      normalize_dcts = True,
      auto_reduce_coefficients = False
  ):

    # call base class constructor
    Extractor.__init__(
        self,
        block_size = block_size,
        block_overlap = block_overlap,
        number_of_dct_coefficients = number_of_dct_coefficients,
        normalize_blocks = normalize_blocks,
        normalize_dcts = normalize_dcts,
        auto_reduce_coefficients = auto_reduce_coefficients
    )

    # block parameters
    block_size = block_size if isinstance(block_size, (tuple, list)) else (block_size, block_size)
    block_overlap = block_overlap if isinstance(block_overlap, (tuple, list)) else (block_overlap, block_overlap)

    if block_size[0] < block_overlap[0] or block_size[1] < block_overlap[1]:
      raise ValueError("The overlap '%s' is bigger than the block size '%s'. This won't work. Please check your setup!"%(block_overlap, block_size))
    if block_size[0] * block_size[1] <= number_of_dct_coefficients:
      if auto_reduce_coefficients:
        number_of_dct_coefficients = block_size[0] * block_size[1] - 1
      else:
        raise ValueError("You selected more coefficients %d than your blocks have %d. This won't work. Please check your setup!"%(number_of_dct_coefficients, block_size[0] * block_size[1]))

    self.dct_features = bob.ip.base.DCTFeatures(number_of_dct_coefficients, block_size, block_overlap, normalize_blocks, normalize_dcts)
Example #2
0
 def __init__(
     self,
     win_length_ms = 20,
     win_shift_ms = 10,
     n_filters = 24 ,
     dct_norm = False,
     f_min = 0.0,
     f_max = 4000,
     delta_win = 2,
     mel_scale = True,
     with_energy = True,
     with_delta = True,
     with_delta_delta = True,
     n_ceps = 19, # 0-->18
     pre_emphasis_coef = 0.95,
     features_mask = numpy.arange(0,60),
     # Normalization
     normalize_flag = True,
     **kwargs
 ):
     # call base class constructor with its set of parameters
   Extractor.__init__(
       self,
       win_length_ms = win_length_ms,
       win_shift_ms = win_shift_ms,
       n_filters = n_filters,
       dct_norm = dct_norm,
       f_min = f_min,
       f_max = f_max,
       delta_win = delta_win,
       mel_scale = mel_scale,
       with_energy = with_energy,
       with_delta = with_delta,
       with_delta_delta = with_delta_delta,
       n_ceps = n_ceps,
       pre_emphasis_coef = pre_emphasis_coef,
       features_mask = features_mask,
       normalize_flag = normalize_flag,
   )
   # copy parameters
   self.win_length_ms = win_length_ms
   self.win_shift_ms = win_shift_ms
   self.n_filters = n_filters
   self.dct_norm = dct_norm
   self.f_min = f_min
   self.f_max = f_max
   self.delta_win = delta_win
   self.mel_scale = mel_scale
   self.with_energy = with_energy
   self.with_delta = with_delta
   self.with_delta_delta = with_delta_delta
   self.n_ceps = n_ceps
   self.pre_emphasis_coef = pre_emphasis_coef
   self.features_mask = features_mask
   self.normalize_flag = normalize_flag
Example #3
0
 def __init__(
         self,
         win_length_ms=20,
         win_shift_ms=10,
         n_filters=24,
         dct_norm=False,
         f_min=0.0,
         f_max=4000,
         delta_win=2,
         mel_scale=True,
         with_energy=True,
         with_delta=True,
         with_delta_delta=True,
         n_ceps=19,  # 0-->18
         pre_emphasis_coef=0.95,
         features_mask=numpy.arange(0, 60),
         # Normalization
         normalize_flag=True,
         **kwargs):
     # call base class constructor with its set of parameters
     Extractor.__init__(
         self,
         win_length_ms=win_length_ms,
         win_shift_ms=win_shift_ms,
         n_filters=n_filters,
         dct_norm=dct_norm,
         f_min=f_min,
         f_max=f_max,
         delta_win=delta_win,
         mel_scale=mel_scale,
         with_energy=with_energy,
         with_delta=with_delta,
         with_delta_delta=with_delta_delta,
         n_ceps=n_ceps,
         pre_emphasis_coef=pre_emphasis_coef,
         features_mask=features_mask,
         normalize_flag=normalize_flag,
     )
     # copy parameters
     self.win_length_ms = win_length_ms
     self.win_shift_ms = win_shift_ms
     self.n_filters = n_filters
     self.dct_norm = dct_norm
     self.f_min = f_min
     self.f_max = f_max
     self.delta_win = delta_win
     self.mel_scale = mel_scale
     self.with_energy = with_energy
     self.with_delta = with_delta
     self.with_delta_delta = with_delta_delta
     self.n_ceps = n_ceps
     self.pre_emphasis_coef = pre_emphasis_coef
     self.features_mask = features_mask
     self.normalize_flag = normalize_flag
Example #4
0
 def __init__(self,
              features_mask=numpy.arange(0, 60),
              normalize_flag=True,
              **kwargs):
     # call base class constructor with its set of parameters
     Extractor.__init__(
         self,
         features_mask=features_mask,
         normalize_flag=normalize_flag,
     )
     # copy parameters
     self.features_mask = features_mask
     self.normalize_flag = normalize_flag
Example #5
0
 def __init__(
     self,
     features_mask = numpy.arange(0,60),
     normalize_flag = True,
     **kwargs
 ):
     # call base class constructor with its set of parameters
   Extractor.__init__(
       self,
       features_mask = features_mask,
       normalize_flag = normalize_flag,
   )
   # copy parameters
   self.features_mask = features_mask
   self.normalize_flag = normalize_flag
Example #6
0
 def __init__(
         self,
         split_training_data_by_client=False,
         features_mask=numpy.zeros(90),  # mask of which features to read
         **kwargs):
     # call base class constructor with its set of parameters
     Preprocessor.__init__(self,
                           read_original_data=self.read_matlab_files,
                           **kwargs)
     Extractor.__init__(
         self,
         requires_training=False,
         split_training_data_by_client=split_training_data_by_client,
         **kwargs)
     self.features_mask = features_mask
Example #7
0
    def __init__(
            self,
            block_size=12,  # 1 or two parameters for block size
            block_overlap=11,  # 1 or two parameters for block overlap
            number_of_dct_coefficients=45,
            normalize_blocks=True,
            normalize_dcts=True,
            auto_reduce_coefficients=False):

        # call base class constructor
        Extractor.__init__(
            self,
            block_size=block_size,
            block_overlap=block_overlap,
            number_of_dct_coefficients=number_of_dct_coefficients,
            normalize_blocks=normalize_blocks,
            normalize_dcts=normalize_dcts,
            auto_reduce_coefficients=auto_reduce_coefficients)

        # block parameters
        block_size = block_size if isinstance(block_size,
                                              (tuple, list)) else (block_size,
                                                                   block_size)
        block_overlap = block_overlap if isinstance(
            block_overlap, (tuple, list)) else (block_overlap, block_overlap)

        if block_size[0] < block_overlap[0] or block_size[1] < block_overlap[1]:
            raise ValueError(
                "The overlap '%s' is bigger than the block size '%s'. This won't work. Please check your setup!"
                % (block_overlap, block_size))
        if block_size[0] * block_size[1] <= number_of_dct_coefficients:
            if auto_reduce_coefficients:
                number_of_dct_coefficients = block_size[0] * block_size[1] - 1
            else:
                raise ValueError(
                    "You selected more coefficients %d than your blocks have %d. This won't work. Please check your setup!"
                    % (number_of_dct_coefficients,
                       block_size[0] * block_size[1]))

        self.dct_features = bob.ip.base.DCTFeatures(number_of_dct_coefficients,
                                                    block_size, block_overlap,
                                                    normalize_blocks,
                                                    normalize_dcts)
Example #8
0
  def __init__(
      self,
      # Gabor parameters
      gabor_directions = 8,
      gabor_scales = 5,
      gabor_sigma = 2. * math.pi,
      gabor_maximum_frequency = math.pi / 2.,
      gabor_frequency_step = math.sqrt(.5),
      gabor_power_of_k = 0,
      gabor_dc_free = True,

      # what kind of information to extract
      normalize_gabor_jets = True,

      # setup of the aligned grid
      eyes = None, # if set, the grid setup will be aligned to the eye positions {'leye' : LEFT_EYE_POS, 'reye' : RIGHT_EYE_POS},
      nodes_between_eyes = 4,
      nodes_along_eyes = 2,
      nodes_above_eyes = 3,
      nodes_below_eyes = 7,

      # setup of static grid
      node_distance = None,    # one or two integral values
      first_node = None,       # one or two integral values, or None -> automatically determined
  ):

    # call base class constructor
    Extractor.__init__(
        self,

        gabor_directions = gabor_directions,
        gabor_scales = gabor_scales,
        gabor_sigma = gabor_sigma,
        gabor_maximum_frequency = gabor_maximum_frequency,
        gabor_frequency_step = gabor_frequency_step,
        gabor_power_of_k = gabor_power_of_k,
        gabor_dc_free = gabor_dc_free,
        normalize_gabor_jets = normalize_gabor_jets,
        eyes = eyes,
        nodes_between_eyes = nodes_between_eyes,
        nodes_along_eyes = nodes_along_eyes,
        nodes_above_eyes = nodes_above_eyes,
        nodes_below_eyes = nodes_below_eyes,
        node_distance = node_distance,
        first_node = first_node
    )

    # create Gabor wavelet transform class
    self.gwt = bob.ip.gabor.Transform(
        number_of_scales = gabor_scales,
        number_of_directions = gabor_directions,
        sigma = gabor_sigma,
        k_max = gabor_maximum_frequency,
        k_fac = gabor_frequency_step,
        power_of_k = gabor_power_of_k,
        dc_free = gabor_dc_free
    )

    # create graph extractor
    if eyes is not None:
      self._aligned_graph = bob.ip.gabor.Graph(
          righteye = [int(e) for e in eyes['reye']],
          lefteye = [int(e) for e in eyes['leye']],
          between = int(nodes_between_eyes),
          along = int(nodes_along_eyes),
          above = int(nodes_above_eyes),
          below = int(nodes_below_eyes)
      )
    else:
      if node_distance is None:
        raise ValueError("Please specify either 'eyes' or the grid parameters 'node_distance' (and 'first_node')!")
      self._aligned_graph = None
      self._last_image_resolution = None
      self.first_node = first_node
      self.node_distance = node_distance
      if isinstance(self.node_distance, (int, float)):
         self.node_distance = (int(self.node_distance), int(self.node_distance))

    self.normalize_jets = normalize_gabor_jets
    self.trafo_image = None
Example #9
0
  def __init__(
      self,
      # Block setup
      block_size,    # one or two parameters for block size
      block_overlap = 0, # one or two parameters for block overlap
      # Gabor parameters
      gabor_directions = 8,
      gabor_scales = 5,
      gabor_sigma = 2. * math.pi,
      gabor_maximum_frequency = math.pi / 2.,
      gabor_frequency_step = math.sqrt(.5),
      gabor_power_of_k = 0,
      gabor_dc_free = True,
      use_gabor_phases = False,
      # LBP parameters
      lbp_radius = 2,
      lbp_neighbor_count = 8,
      lbp_uniform = True,
      lbp_circular = True,
      lbp_rotation_invariant = False,
      lbp_compare_to_average = False,
      lbp_add_average = False,
      # histogram options
      sparse_histogram = False,
      split_histogram = None
  ):
    # call base class constructor
    Extractor.__init__(
        self,

        block_size = block_size,
        block_overlap = block_overlap,
        gabor_directions = gabor_directions,
        gabor_scales = gabor_scales,
        gabor_sigma = gabor_sigma,
        gabor_maximum_frequency = gabor_maximum_frequency,
        gabor_frequency_step = gabor_frequency_step,
        gabor_power_of_k = gabor_power_of_k,
        gabor_dc_free = gabor_dc_free,
        use_gabor_phases = use_gabor_phases,
        lbp_radius = lbp_radius,
        lbp_neighbor_count = lbp_neighbor_count,
        lbp_uniform = lbp_uniform,
        lbp_circular = lbp_circular,
        lbp_rotation_invariant = lbp_rotation_invariant,
        lbp_compare_to_average = lbp_compare_to_average,
        lbp_add_average = lbp_add_average,
        sparse_histogram = sparse_histogram,
        split_histogram = split_histogram
    )

    # block parameters
    self.block_size = block_size if isinstance(block_size, (tuple, list)) else (block_size, block_size)
    self.block_overlap = block_overlap if isinstance(block_overlap, (tuple, list)) else (block_overlap, block_overlap)
    if self.block_size[0] < self.block_overlap[0] or self.block_size[1] < self.block_overlap[1]:
      raise ValueError("The overlap is bigger than the block size. This won't work. Please check your setup!")

    # Gabor wavelet transform class
    self.gwt = bob.ip.gabor.Transform(
        number_of_scales = gabor_scales,
        number_of_directions = gabor_directions,
        sigma = gabor_sigma,
        k_max = gabor_maximum_frequency,
        k_fac = gabor_frequency_step,
        power_of_k = gabor_power_of_k,
        dc_free = gabor_dc_free
    )
    self.trafo_image = None
    self.use_phases = use_gabor_phases

    self.lbp = bob.ip.base.LBP(
        neighbors = lbp_neighbor_count,
        radius = float(lbp_radius),
        circular = lbp_circular,
        to_average = lbp_compare_to_average,
        add_average_bit = lbp_add_average,
        uniform = lbp_uniform,
        rotation_invariant = lbp_rotation_invariant,
        border_handling = 'wrap'
    )

    self.split = split_histogram
    self.sparse = sparse_histogram
    if self.sparse and self.split:
      raise ValueError("Sparse histograms cannot be split! Check your setup!")
Example #10
0
    def __init__(
            self,
            win_length_ms=20.,  # 20 ms
            win_shift_ms=10.,  # 10 ms
            n_filters=40,
            f_min=0.0,  # 0 Hz
            f_max=8000,  # 8 KHz - this is an important value. Normally it should be half of the sampling frequency
            pre_emphasis_coef=1.0,
            mel_scale=True,
            rect_filter=False,
            inverse_filter=False,
            delta_win=2,
            n_ceps=19,  # 0-->18,
            dct_norm=False,
            ssfc_features=False,
            scfc_features=False,
            scmc_features=False,
            with_delta=True,
            with_delta_delta=True,
            with_energy=False,
            normalize_spectrum=False,
            keep_only_deltas=True,
            log_filter=True,
            energy_filter=False,
            vad_filter="no_filter",  # we do apply any trim filter by default
            normalize_feature_vector=False,
            **kwargs):
        # call base class constructor with its set of parameters
        Extractor.__init__(self,
                           requires_training=False,
                           split_training_data_by_client=False,
                           **kwargs)
        # copy parameters
        self.win_length_ms = win_length_ms
        self.win_shift_ms = win_shift_ms
        self.n_filters = n_filters
        self.f_min = f_min
        self.f_max = f_max
        self.pre_emphasis_coef = pre_emphasis_coef
        self.mel_scale = mel_scale
        self.rect_filter = rect_filter
        self.inverse_filter = inverse_filter
        self.delta_win = delta_win
        self.n_ceps = n_ceps
        self.dct_norm = dct_norm
        self.ssfc_features = ssfc_features
        self.scfc_features = scfc_features
        self.scmc_features = scmc_features
        self.with_delta = with_delta
        self.with_delta_delta = with_delta_delta
        self.with_energy = with_energy
        self.normalize_spectrum = normalize_spectrum
        self.keep_only_deltas = keep_only_deltas
        self.log_filter = log_filter
        self.energy_filter = energy_filter
        self.vad_filter = vad_filter
        self.normalize_feature_vector = normalize_feature_vector

        # compute the size of the feature vector
        self.features_len = self.n_ceps
        if self.with_delta:
            self.features_len += self.n_ceps
        if self.with_delta_delta:
            self.features_len += self.n_ceps
Example #11
0
 def __init__(self, subspace_dimension):
   # We have to register that this function will need a training step
   Extractor.__init__(self, requires_training = True, subspace_dimension = subspace_dimension)
   self.subspace_dimension = subspace_dimension
Example #12
0
 def __init__(self, **kwargs):
   Extractor.__init__(self, requires_training=True)
   self.model = False
Example #13
0
 def __init__(self, **kwargs):
     Extractor.__init__(self, requires_training=True)
     self.model = False
Example #14
0
    def __init__(
            self,
            win_length_ms=20.,  # 20 ms
            win_shift_ms=10.,  # 10 ms
            n_filters=40,
            f_min=0.0,  # 0 Hz
            f_max=8000,  # 8 KHz - this is an important value. Normally it should be half of the sampling frequency
            pre_emphasis_coef=1.0,
            mel_scale=False,
            rect_filter=False,
            inverse_filter=False,
            delta_win=2,
            n_ceps=19,  # 0-->18,
            dct_norm=False,
            d= 1,
            p= 3,
            k= 7,
            with_delta=True,
            with_delta_delta=False,
            with_energy=False,
            normalize_spectrum=False,
            keep_only_deltas=True,
            log_filter=True,
            energy_filter=False,
            vad_filter="no_filter",  # we do apply any trim filter by default
            normalize_feature_vector = False,
            **kwargs
    ):
        # call base class constructor with its set of parameters
        Extractor.__init__(
            self,
            requires_training=False, split_training_data_by_client=False,
            **kwargs
        )
        # copy parameters
        self.win_length_ms = win_length_ms
        self.win_shift_ms = win_shift_ms
        self.n_filters = n_filters
        self.f_min = f_min
        self.f_max = f_max
        self.pre_emphasis_coef = pre_emphasis_coef
        self.mel_scale = mel_scale
        self.rect_filter = rect_filter
        self.inverse_filter = inverse_filter
        self.delta_win = delta_win
        self.n_ceps = n_ceps
        self.dct_norm = dct_norm
        self.d = d
        self.p = p
        self.k = k
        self.with_delta = with_delta
        self.with_delta_delta = with_delta_delta
        self.with_energy = with_energy
        self.normalize_spectrum = normalize_spectrum
        self.keep_only_deltas = keep_only_deltas
        self.log_filter = log_filter
        self.energy_filter = energy_filter
        self.vad_filter = vad_filter
        self.normalize_feature_vector = normalize_feature_vector

        # compute the size of the feature vector
        self.features_len = self.n_ceps
        if self.with_delta:
            self.features_len += self.n_ceps
        if self.with_delta_delta:
            self.features_len += self.n_ceps


        def normalize_features(self, features):
        mean = numpy.mean(features, axis=0)
        std = numpy.std(features, axis=0)
        return numpy.divide(features-mean, std)


        def compute_ceps(self, rate, data):
    
        ceps = bob.ap.Ceps(rate, self.win_length_ms, self.win_shift_ms, self.n_filters, self.n_ceps, self.f_min,
                           self.f_max, self.delta_win, self.pre_emphasis_coef)
        ceps.dct_norm = self.dct_norm
        ceps.mel_scale = self.mel_scale
        ceps.rect_filter = self.rect_filter
        ceps.inverse_filter = self.inverse_filter
        ceps.with_energy = self.with_energy
        ceps.with_delta = self.with_delta
        ceps.with_delta_delta = self.with_delta_delta
        ceps.normalize_spectrum = self.normalize_spectrum
        ceps.log_filter = self.log_filter
        ceps.energy_filter = self.energy_filter

        cepstral_features = ceps(data)

        if self.keep_only_deltas:
            cepstral_features = cepstral_features[:, self.n_ceps:]
        return cepstral_features


        def shifted_delta_cepstral(cep, d, p, k):
        
        y = numpy.r_[numpy.resize(cep[0, :], (d, cep.shape[1])), cep, numpy.resize(cep[-1, :], (k * 3 + d, cep.shape[1]))]

        delta = self.with_delta
        sdc = numpy.empty((cep.shape[0], cep.shape[1] * k))

        idx = numpy.zeros(delta.shape[0], dtype='bool')
        for ii in range(k):
            idx[d + ii * p] = True
        for ff in range(len(cep)):
            sdc[ff, :] = delta[idx, :].reshape(1, -1)
            idx = numpy.roll(idx, 1)
        return numpy.hstack((cep, sdc))


        def _call_(self, input_data):
        
        ceps = self._call_(input_data)
        return shifted_delta_cepstral(ceps,d,p,k)
 def __init__(self, subspace_dimension):
     # We have to register that this function will need a training step
     Extractor.__init__(self, requires_training=True, subspace_dimension=subspace_dimension)
     self.subspace_dimension = subspace_dimension
Example #16
0
    def __init__(
            self,
            # Block setup
            block_size,  # one or two parameters for block size
            block_overlap=0,  # one or two parameters for block overlap
            # Gabor parameters
        gabor_directions=8,
            gabor_scales=5,
            gabor_sigma=2. * math.pi,
            gabor_maximum_frequency=math.pi / 2.,
            gabor_frequency_step=math.sqrt(.5),
            gabor_power_of_k=0,
            gabor_dc_free=True,
            use_gabor_phases=False,
            # LBP parameters
            lbp_radius=2,
            lbp_neighbor_count=8,
            lbp_uniform=True,
            lbp_circular=True,
            lbp_rotation_invariant=False,
            lbp_compare_to_average=False,
            lbp_add_average=False,
            # histogram options
            sparse_histogram=False,
            split_histogram=None):
        # call base class constructor
        Extractor.__init__(self,
                           block_size=block_size,
                           block_overlap=block_overlap,
                           gabor_directions=gabor_directions,
                           gabor_scales=gabor_scales,
                           gabor_sigma=gabor_sigma,
                           gabor_maximum_frequency=gabor_maximum_frequency,
                           gabor_frequency_step=gabor_frequency_step,
                           gabor_power_of_k=gabor_power_of_k,
                           gabor_dc_free=gabor_dc_free,
                           use_gabor_phases=use_gabor_phases,
                           lbp_radius=lbp_radius,
                           lbp_neighbor_count=lbp_neighbor_count,
                           lbp_uniform=lbp_uniform,
                           lbp_circular=lbp_circular,
                           lbp_rotation_invariant=lbp_rotation_invariant,
                           lbp_compare_to_average=lbp_compare_to_average,
                           lbp_add_average=lbp_add_average,
                           sparse_histogram=sparse_histogram,
                           split_histogram=split_histogram)

        # block parameters
        self.block_size = block_size if isinstance(block_size,
                                                   (tuple,
                                                    list)) else (block_size,
                                                                 block_size)
        self.block_overlap = block_overlap if isinstance(
            block_overlap, (tuple, list)) else (block_overlap, block_overlap)
        if self.block_size[0] < self.block_overlap[0] or self.block_size[
                1] < self.block_overlap[1]:
            raise ValueError(
                "The overlap is bigger than the block size. This won't work. Please check your setup!"
            )

        # Gabor wavelet transform class
        self.gwt = bob.ip.gabor.Transform(
            number_of_scales=gabor_scales,
            number_of_directions=gabor_directions,
            sigma=gabor_sigma,
            k_max=gabor_maximum_frequency,
            k_fac=gabor_frequency_step,
            power_of_k=gabor_power_of_k,
            dc_free=gabor_dc_free)
        self.trafo_image = None
        self.use_phases = use_gabor_phases

        self.lbp = bob.ip.base.LBP(neighbors=lbp_neighbor_count,
                                   radius=float(lbp_radius),
                                   circular=lbp_circular,
                                   to_average=lbp_compare_to_average,
                                   add_average_bit=lbp_add_average,
                                   uniform=lbp_uniform,
                                   rotation_invariant=lbp_rotation_invariant,
                                   border_handling='wrap')

        self.split = split_histogram
        self.sparse = sparse_histogram
        if self.sparse and self.split:
            raise ValueError(
                "Sparse histograms cannot be split! Check your setup!")
    def __init__(
            self,
            win_length_ms=20.,  # 20 ms
            win_shift_ms=10.,  # 10 ms
            n_filters=40,
            f_min=0.0,  # 0 Hz
            f_max=8000,  # 8 KHz - this is an important value. Normally it should be half of the sampling frequency
            pre_emphasis_coef=1.0,
            mel_scale=True,
            rect_filter=False,
            inverse_filter=False,
            delta_win=2,
            n_ceps=19,  # 0-->18,
            dct_norm=False,
            ssfc_features=False,
            scfc_features=False,
            scmc_features=False,
            with_delta=True,
            with_delta_delta=True,
            with_energy=False,
            normalize_spectrum=False,
            keep_only_deltas=True,
            log_filter=True,
            energy_filter=False,
            vad_filter="no_filter",  # we do apply any trim filter by default
            normalize_feature_vector = False,
            **kwargs
    ):
        # call base class constructor with its set of parameters
        Extractor.__init__(
            self,
            requires_training=False, split_training_data_by_client=False,
            **kwargs
        )
        # copy parameters
        self.win_length_ms = win_length_ms
        self.win_shift_ms = win_shift_ms
        self.n_filters = n_filters
        self.f_min = f_min
        self.f_max = f_max
        self.pre_emphasis_coef = pre_emphasis_coef
        self.mel_scale = mel_scale
        self.rect_filter = rect_filter
        self.inverse_filter = inverse_filter
        self.delta_win = delta_win
        self.n_ceps = n_ceps
        self.dct_norm = dct_norm
        self.ssfc_features = ssfc_features
        self.scfc_features = scfc_features
        self.scmc_features = scmc_features
        self.with_delta = with_delta
        self.with_delta_delta = with_delta_delta
        self.with_energy = with_energy
        self.normalize_spectrum = normalize_spectrum
        self.keep_only_deltas = keep_only_deltas
        self.log_filter = log_filter
        self.energy_filter = energy_filter
        self.vad_filter = vad_filter
        self.normalize_feature_vector = normalize_feature_vector

        # compute the size of the feature vector
        self.features_len = self.n_ceps
        if self.with_delta:
            self.features_len += self.n_ceps
        if self.with_delta_delta:
            self.features_len += self.n_ceps