Пример #1
0
def test_save():

    # Load audio and extract mel features
    param = {
        'feature_extraction': {
            'fs': 44100,
            'win_length_seconds': 0.04,
            'hop_length_seconds': 0.02,
            'spectrogram_type': 'magnitude',
            'window_type': 'hann_symmetric',
            'n_mels': 40,  # Number of MEL bands used
            'n_fft': 2048,  # FFT length
            'fmin': 0,  # Minimum frequency when constructing MEL bands
            'fmax': 22050,  # Maximum frequency when constructing MEL band
            'htk': True,  # Switch for HTK-styled MEL-frequency equation
        },
    }

    chain = dcase_util.processors.ProcessingChain([{
        'processor_name': 'dcase_util.processors.MonoAudioReadingProcessor',
        'init_parameters': {
            'fs': param['feature_extraction']['fs']
        }
    }, {
        'processor_name':
        'dcase_util.processors.MelExtractorProcessor',
        'init_parameters':
        param['feature_extraction']
    }])
    container = chain.process(
        filename=dcase_util.utils.Example.audio_filename())

    tmp = tempfile.NamedTemporaryFile('r+',
                                      suffix='.cpickle',
                                      dir=tempfile.gettempdir(),
                                      delete=False)
    try:

        sequencer = Sequencer(
            sequence_length=10,
            hop_length=10,
        ).save(filename=tmp.name).load()

        sequenced_data = sequencer.sequence(data=container)
        nose.tools.eq_(sequenced_data.length, 10)
        nose.tools.eq_(sequenced_data.vector_length, 40)
        nose.tools.eq_(sequenced_data.data.shape, (40, 10, 50))

    finally:
        try:
            tmp.close()
            os.unlink(tmp.name)
        except:
            pass
Пример #2
0
class AudioSequencingProcessor(SequencingProcessor):
    """Frame blocking processor"""
    input_type = ProcessingChainItemType.AUDIO  #: Input data type
    output_type = ProcessingChainItemType.DATA_CONTAINER  #: Output data type

    def __init__(self,
                 sequence_length=44100,
                 hop_length=None,
                 padding=None,
                 shift_border='roll',
                 shift=0,
                 required_data_amount_per_segment=0.9,
                 **kwargs):
        """__init__ method.

        Parameters
        ----------
        sequence_length : int
            Sequence length
            Default value 44100

        hop_length : int
            Hop value of when forming the sequence, if None then hop length equals to sequence_length (non-overlapping sequences).
            Default value None

        padding: str
            How data is treated at the boundaries [None, 'zero', 'repeat']
            Default value None

        shift_border : string, ['roll', 'shift']
            Sequence border handling when doing temporal shifting.
            Default value roll

        shift : int
            Sequencing grid shift.
            Default value 0

        required_data_amount_per_segment : float [0,1]
            Percentage of valid data items per segment there need to be for valid segment. Use this parameter to
            filter out part of the non-full segments.
            Default value 0.9

        """

        # Inject initialization parameters back to kwargs
        kwargs.update({
            'sequence_length':
            sequence_length,
            'hop_length':
            hop_length,
            'padding':
            padding,
            'shift':
            shift,
            'shift_border':
            shift_border,
            'required_data_amount_per_segment':
            required_data_amount_per_segment
        })

        # Run super init to call init of mixins too
        super(AudioSequencingProcessor, self).__init__(**kwargs)

        self.sequencer = Sequencer(**self.init_parameters)

    def process(self, data=None, store_processing_chain=False, **kwargs):
        """Process

        Parameters
        ----------
        data : DataContainer
            Data

        store_processing_chain : bool
            Store processing chain to data container returned
            Default value False

        Returns
        -------
        DataMatrix3DContainer

        """
        from dcase_util.containers import AudioContainer, DataMatrix2DContainer

        if isinstance(data, AudioContainer):
            audio_data = data.data
            if data.channels == 1:
                audio_data = audio_data[numpy.newaxis, :]

            # Do processing
            container = self.sequencer.sequence(data=DataMatrix2DContainer(
                audio_data, time_resolution=1 / float(data.fs)),
                                                **kwargs)

            if store_processing_chain:
                # Get processing chain item
                processing_chain_item = self.get_processing_chain_item()

                # Update current processing parameters into chain item
                processing_chain_item.update({'process_parameters': kwargs})

                # Push chain item into processing chain stored in the container
                container.processing_chain.push_processor(
                    **processing_chain_item)

            return container

        else:
            message = '{name}: Wrong input data type, type required [{input_type}].'.format(
                name=self.__class__.__name__, input_type=self.input_type)

            self.logger.exception(message)
            raise ValueError(message)
Пример #3
0
def test_sequence():
    # Get data in container
    container = dcase_util.containers.FeatureContainer(data=numpy.repeat(
        numpy.arange(0, 100).reshape(1, -1), 3, axis=0),
                                                       time_resolution=1)

    # Initialize sequencer, 10 frames long sequences, non-overlapping sequences
    sequencer = Sequencer(sequence_length=10, hop_length=10)
    sequenced_data = sequencer.sequence(data=container)
    # Check shape
    nose.tools.eq_(sequenced_data.length, 10)
    nose.tools.eq_(sequenced_data.vector_length, 3)
    nose.tools.eq_(sequenced_data.data.shape, (3, 10, 10))

    # Check content
    numpy.testing.assert_equal(sequenced_data.data[0, :, 0],
                               numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
    numpy.testing.assert_equal(
        sequenced_data.data[0, :, 1],
        numpy.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
    numpy.testing.assert_equal(
        sequenced_data.data[0, :, 2],
        numpy.array([20, 21, 22, 23, 24, 25, 26, 27, 28, 29]))

    # Initialize sequencer, 10 frames long sequences, 1 frame hop
    sequencer = Sequencer(sequence_length=10, hop_length=1)
    sequenced_data = sequencer.sequence(data=container)
    # Check shape
    nose.tools.eq_(sequenced_data.length, 10)
    nose.tools.eq_(sequenced_data.vector_length, 3)
    nose.tools.eq_(sequenced_data.data.shape, (3, 10, 91))

    # Check content
    numpy.testing.assert_equal(sequenced_data.data[0, :, 0],
                               numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 1],
                               numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 2],
                               numpy.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]))

    # Initialize sequencer, 10 frames long sequences, 1 frame hop
    sequencer = Sequencer(sequence_length=10, hop_length=1)
    # Shift with one frame (+1 from original)
    sequencer.increase_shifting(1)
    sequenced_data = sequencer.sequence(data=container)
    numpy.testing.assert_equal(sequenced_data.data[0, :, 0],
                               numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 1],
                               numpy.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 2],
                               numpy.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12]))

    # Shift with one frame (+2 from original)
    sequencer.increase_shifting(1)
    sequenced_data = sequencer.sequence(data=container)
    numpy.testing.assert_equal(sequenced_data.data[0, :, 0],
                               numpy.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 1],
                               numpy.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 2],
                               numpy.array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13]))

    # Initialize sequencer, 10 frames long sequences, 1 frame hop, shifting border handling mode 'shift'
    sequencer = Sequencer(sequence_length=10,
                          hop_length=1,
                          shift_border='shift')
    sequencer.increase_shifting(1)
    sequenced_data = sequencer.sequence(data=container)
    numpy.testing.assert_equal(sequenced_data.data[0, :, 0],
                               numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 1],
                               numpy.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]))
    numpy.testing.assert_equal(sequenced_data.data[0, :, 2],
                               numpy.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12]))

    # Get feature data
    container = dcase_util.utils.Example.feature_container()

    sequencer = Sequencer(
        sequence_length=10,
        hop_length=10,
    )
    sequenced_data = sequencer.sequence(data=container)
    nose.tools.eq_(sequenced_data.length, 10)
    nose.tools.eq_(sequenced_data.vector_length, 40)
    nose.tools.eq_(sequenced_data.data.shape, (40, 10, 50))

    sequencer = Sequencer(
        sequence_length=10,
        hop_length=1,
    )
    sequenced_data = sequencer.sequence(data=container)

    nose.tools.eq_(sequenced_data.length, 10)
    nose.tools.eq_(sequenced_data.vector_length, 40)
    nose.tools.eq_(sequenced_data.data.shape, (40, 10, 492))
Пример #4
0
class RepositorySequencingProcessor(Processor):
    """Data sequencing processor"""
    input_type = ProcessingChainItemType.DATA_REPOSITORY  #: Input data type
    output_type = ProcessingChainItemType.DATA_REPOSITORY  #: Output data type

    def __init__(self,
                 sequence_length=10,
                 hop_length=None,
                 padding=None,
                 shift=0,
                 shift_border='roll',
                 required_data_amount_per_segment=0.9,
                 **kwargs):
        """__init__ method.

        Parameters
        ----------
        sequence_length : int
            Sequence length
            Default value 10

        hop_length : int
            Hop value of when forming the sequence, if None then hop length equals to sequence_length (non-overlapping sequences).
            Default value None

        padding: str
            How data is treated at the boundaries [None, 'zero', 'repeat']
            Default value None

        shift_border : string, ['roll', 'shift']
            Sequence border handling when doing temporal shifting.
            Default value roll

        shift : int
            Sequencing grid shift.
            Default value 0

        required_data_amount_per_segment : float [0,1]
            Percentage of valid data items per segment there need to be for valid segment. Use this parameter to
            filter out part of the non-full segments.
            Default value 0.9

        """

        # Inject initialization parameters back to kwargs
        kwargs.update({
            'sequence_length':
            sequence_length,
            'hop_length':
            hop_length,
            'padding':
            padding,
            'shift':
            shift,
            'shift_border':
            shift_border,
            'required_data_amount_per_segment':
            required_data_amount_per_segment
        })

        # Run super init to call init of mixins too
        super(RepositorySequencingProcessor, self).__init__(**kwargs)

        self.sequencer = Sequencer(**self.init_parameters)

    def process(self, data=None, store_processing_chain=False, **kwargs):
        """Process

        Parameters
        ----------
        data : DataRepository
            Data

        store_processing_chain : bool
            Store processing chain to data container returned
            Default value False

        Returns
        -------
        DataMatrix3DContainer

        """

        if isinstance(data, RepositoryContainer):
            # Label exists in data repository
            for label in data:
                for stream_id in data[label]:
                    # Do processing
                    data.set_container(label=label,
                                       stream_id=stream_id,
                                       container=self.sequencer.sequence(
                                           data=data.get_container(
                                               label=label,
                                               stream_id=stream_id),
                                           **kwargs))

            if store_processing_chain:
                # Get processing chain item
                processing_chain_item = self.get_processing_chain_item()

                # Push chain item into processing chain stored in the container
                data.processing_chain.push_processor(**processing_chain_item)

            return data

        else:
            message = '{name}: Wrong input data type, type required [{input_type}].'.format(
                name=self.__class__.__name__, input_type=self.input_type)

            self.logger.exception(message)
            raise ValueError(message)