def test_factory(self):
        time_mode_list = ["global", "event", "segment", "framewise"]
        data_mode_list = ["value", "label"]

        for time_mode in time_mode_list:
            for data_mode in data_mode_list:
                result = AnalyzerResult(data_mode, time_mode)
                self.assertEqual(result.data_mode, data_mode)
                self.assertEqual(result.time_mode, time_mode)
                self.assertEqual(result.keys(), ["id_metadata", "data_object", "audio_metadata", "parameters"])
Exemplo n.º 2
0
    def test_factory(self):
        time_mode_list = ['global', 'event', 'segment', 'framewise']
        data_mode_list = ['value', 'label']

        for time_mode in time_mode_list:
            for data_mode in data_mode_list:
                result = AnalyzerResult(data_mode, time_mode)
                self.assertEqual(result.data_mode, data_mode)
                self.assertEqual(result.time_mode, time_mode)
                self.assertEqual(result.keys(), ['id_metadata',
                                                 'data_object',
                                                 'audio_metadata',
                                                 'parameters'])
Exemplo n.º 3
0
    def to_representation(self, obj):
        request = self.context['request']
        self.subprocessor_id = str(request.GET.get('subprocessor_id', ''))
        self.start = float(request.GET.get('start', 0))
        self.stop = float(request.GET.get('stop', -1))
        self.width = int(float(request.GET.get('width', 1024)))
        self.height = int(float(request.GET.get('height', 128)))

        if not self.subprocessor_id:
            self.subprocessor_id = self.get_subprocessor_id(obj)

        if not obj.has_hdf5():
            raise serializers.ValidationError(
                "result must have an hdf5 file to be visualized")

        hdf5_result = h5py.File(obj.hdf5.path, 'r').get(self.subprocessor_id)
        result = AnalyzerResult().from_hdf5(hdf5_result)
        duration = hdf5_result['audio_metadata'].attrs['duration']
        samplerate = hdf5_result['data_object']['frame_metadata'].attrs[
            'samplerate']

        if self.start < 0:
            self.start = 0
        if self.start > duration:
            raise serializers.ValidationError(
                "start must be less than duration")
        if self.stop == -1:
            self.stop = duration

            if self.stop > duration:
                self.stop = duration

        # following same cap_value for width
        # as for waveform's nb_pixel serialization
        cap_value = int(samplerate * abs(self.stop - self.start) / 2)
        if self.width > cap_value:
            self.width = cap_value

        from io import BytesIO
        pil_image = result._render_PIL(size=(self.width, self.height),
                                       dpi=80,
                                       xlim=(self.start, self.stop))
        image_buffer = BytesIO()
        pil_image.save(image_buffer, 'PNG')
        return image_buffer.getvalue()
Exemplo n.º 4
0
    def to_representation(self, obj):
        subprocessor_id = self.context.get('request').query_params.get('id')

        start = float(self.context.get('request').query_params.get('start', 0))
        stop = float(self.context.get('request').query_params.get('stop', -1))
        width = int(self.context.get(
            'request').query_params.get('width', 1024))
        height = int(self.context.get(
            'request').query_params.get('height', 128))

        import h5py
        hdf5_result = h5py.File(obj.hdf5.path, 'r').get(subprocessor_id)
        from timeside.core.analyzer import AnalyzerResult
        result = AnalyzerResult().from_hdf5(hdf5_result)
        duration = hdf5_result['audio_metadata'].attrs['duration']

        if start < 0:
            start = 0
        if start > duration:
            raise serializers.ValidationError(
                "start must be less than duration")
        if stop == -1:
            stop = duration

            if stop > duration:
                stop = duration

        if True:
            # if result.data_object.y_value.size:

            import StringIO
            pil_image = result._render_PIL(
                size=(width, height), dpi=80, xlim=(start, stop))
            image_buffer = StringIO.StringIO()
            pil_image.save(image_buffer, 'PNG')
            return image_buffer.getvalue()
        else:
            return result.to_json()
Exemplo n.º 5
0
    def to_representation(self, obj):
        # subprocessor_id = self.context.get('request').query_params.get('id')

        # start = float(self.context.get('request').query_params.get('start', 0))
        # stop = float(self.context.get('request').query_params.get('stop', -1))
        # width = int(self.context.get(
        #     'request'
        #     ).query_params.get('width', 1024))
        # height = int(self.context.get(
        #     'request'
        #     ).query_params.get('height', 128))

        # import h5py
        # hdf5_result = h5py.File(obj.hdf5.path, 'r').get(subprocessor_id)
        # from timeside.core.analyzer import AnalyzerResult
        # result = AnalyzerResult().from_hdf5(hdf5_result)
        # duration = hdf5_result['audio_metadata'].attrs['duration']

        # if start < 0:
        #     start = 0
        # if start > duration:
        #     raise serializers.ValidationError(
        #         "start must be less than duration")
        # if stop == -1:
        #     stop = duration

        #     if stop > duration:
        #         stop = duration
        subprocessor_id = self.context.get('request').query_params.get('id')
        import h5py
        hdf5_file = h5py.File(obj.hdf5.path, 'r')
        if subprocessor_id:
            hdf5_result = h5py.File(obj.hdf5.path, 'r').get(subprocessor_id)
        else:
            hdf5_result = hdf5_file.visit(hdf5_file.get)
        from timeside.core.analyzer import AnalyzerResult
        result = AnalyzerResult().from_hdf5(hdf5_result)
        return result.as_dict()
Exemplo n.º 6
0
 def test_GlobalValueResult(self):
     "Data methods for Global & Value Result"
     self.time_mode = 'global'
     self.data_mode = 'value'
     self.result = AnalyzerResult(self.data_mode, self.time_mode)
     # Audio
     self.audio_start = np.random.rand(1) * 50
     self.audio_duration = np.random.rand(1) * 500
     # Expected data
     self.data = np.random.rand(1)
     self.time = 0
     self.duration = self.audio_duration
     # Set data in result
     self.result.data_object.value = self.data
     self.result.audio_metadata.start = self.audio_start
     self.result.audio_metadata.duration = self.audio_duration
Exemplo n.º 7
0
 def test_EventLabelResult(self):
     "Data methods for Event & Label Result"
     self.time_mode = 'event'
     self.data_mode = 'label'
     self.result = AnalyzerResult(self.data_mode, self.time_mode)
     # Audio
     self.audio_start = np.random.rand(1) * 50
     # Expected data
     nb_val = 200
     self.data = np.random.randint(10, size=nb_val)
     self.time = np.random.rand(1, nb_val) * 500
     self.duration = np.zeros(nb_val)
     # Set data in result
     self.result.data_object.label = self.data
     self.result.data_object.time = self.time
     self.result.audio_metadata.start = self.audio_start
Exemplo n.º 8
0
 def test_EventValueResult(self):
     "Data methods for Event & Value Result"
     self.time_mode = 'event'
     self.data_mode = 'value'
     self.result = AnalyzerResult(self.data_mode, self.time_mode)
     # Audio
     self.audio_start = np.random.rand(1) * 50
     # Expected data
     nb_val = 200
     self.data = np.random.randn(nb_val, 4)
     self.time = np.random.rand(1, nb_val) * 500
     self.duration = np.zeros(nb_val)
     # Set data in result
     self.result.data_object.value = self.data
     self.result.data_object.time = self.time
     self.result.audio_metadata.start = self.audio_start
Exemplo n.º 9
0
    def setUp(self):
        self.result = AnalyzerResult(data_mode='value',
                                     time_mode='framewise')

        from datetime import datetime
        res_date = datetime.now().replace(microsecond=0).isoformat(' ')
        self.result.id_metadata = dict(date=res_date,
                                       version=__version__,
                                       author='TimeSide',
                                       id="foo_bar",
                                       name="Foo bar",
                                       unit="foo")

        self.result.audio_metadata = dict(uri='Foo.wav',
                                          start=0, duration=20,
                                          channels=2)
Exemplo n.º 10
0
 def test_FrameLabelResult(self):
     "Data methods for Framewise & Label Result"
     self.time_mode = 'framewise'
     self.data_mode = 'label'
     self.result = AnalyzerResult(self.data_mode, self.time_mode)
     # Audio
     self.audio_start = np.random.rand(1) * 50
     # Expected data
     nb_frames = 200
     blocksize = 1024
     stepsize = 512
     samplerate = 44100
     self.data = np.random.randint(10, size=nb_frames)
     self.duration = blocksize / samplerate * np.ones(nb_frames)
     self.time = np.arange(0, nb_frames*stepsize, stepsize) / samplerate
     # Set data in result
     self.result.data_object.label = self.data
     self.result.data_object.frame_metadata.blocksize = blocksize
     self.result.data_object.frame_metadata.stepsize = stepsize
     self.result.data_object.frame_metadata.samplerate = samplerate
     self.result.audio_metadata.start = self.audio_start
Exemplo n.º 11
0
 def tearDown(self):
     result = AnalyzerResult(self.data_mode, self.time_mode)
     self.assertEqual(self.data_object_keys, result.data_object.keys())