Ejemplo n.º 1
0
 def execute(self, job_info):
     
     self._exporter.begin_exports()
 
     value_tuples = self._create_clip_query_values_iterator()
     
     for station, mic_output, date, detector in value_tuples:
         
         clips = _get_clips(
             station, mic_output, date, detector, self._annotation_name,
             self._annotation_value, self._tag_name)
         
         count = clips.count()
         count_text = text_utils.create_count_text(count, 'clip')
         
         _logger.info(
             f'Exporter will visit {count_text} for station '
             f'"{station.name}", mic output "{mic_output.name}", '
             f'date {date}, and detector {detector.name}.')
         
         try:
             _export_clips(clips, self._exporter)
                 
         except Exception:
             _logger.error(
                 'Clip export failed. See below for exception traceback.')
             raise
         
     self._exporter.end_exports()
         
     return True
Ejemplo n.º 2
0
    def execute(self, job_info):

        self._exporter.begin_exports()

        annotation_name, annotation_value = \
            model_utils.get_clip_query_annotation_data(
                'Classification', self._classification)

        value_tuples = self._create_clip_query_values_iterator()

        for detector, station, mic_output, date in value_tuples:

            clips = _get_clips(station, mic_output, detector, date,
                               annotation_name, annotation_value)

            count = clips.count()
            count_text = text_utils.create_count_text(count, 'clip')

            _logger.info(
                ('Exporter will visit {} for detector "{}", station "{}", '
                 'mic output "{}", and date {}.').format(
                     count_text, detector.name, station.name, mic_output.name,
                     date))

            try:
                _export_clips(clips, self._exporter)

            except Exception:
                _logger.error(
                    'Clip export failed. See below for exception traceback.')
                raise

        self._exporter.end_exports()

        return True
Ejemplo n.º 3
0
 def execute(self, job_info):
     
     self._exporter.begin_exports()
 
     value_tuples = self._create_clip_query_values_iterator()
     
     for detector, station, mic_output, date in value_tuples:
         
         clips = _get_clips(station, mic_output, detector, date)
         
         count = clips.count()
         count_text = text_utils.create_count_text(count, 'clip')
         
         _logger.info((
             'Exporter will visit {} for detector "{}", station "{}", '
             'mic output "{}", and date {}.').format(
                 count_text, detector.name, station.name, mic_output.name,
                 date))
         
         try:
             _export_clips(clips, self._exporter)
                 
         except Exception:
             _logger.error(
                 'Clip export failed. See below for exception traceback.')
             raise
         
     self._exporter.end_exports()
         
     return True
Ejemplo n.º 4
0
    def _log_detection_performance(
            self, num_detectors, num_channels, interval_duration,
            processing_time):
        
        format_ = text_utils.format_number
        
        dur = format_(interval_duration)
        time = format_(processing_time)
        
        detectors_text = text_utils.create_count_text(
            num_detectors, 'detector')

        message = (
            '        Ran {} on {} seconds of {}-channel '
            'audio in {} seconds').format(
                detectors_text, dur, num_channels, time)
        
        if processing_time != 0:
            total_duration = num_detectors * num_channels * interval_duration
            speedup = format_(total_duration / processing_time)
            message += ', {} times faster than real time.'.format(speedup)
        else:
            message += '.'
            
        self._logger.info(message)
Ejemplo n.º 5
0
    def execute(self, job_info):

        classifier = self._create_classifier(job_info.job_id)

        classifier.begin_annotations()

        value_tuples = self._create_clip_query_values_iterator()

        for detector, station, mic_output, date in value_tuples:

            clips = _get_clips(station, mic_output, detector, date)

            count = clips.count()
            count_text = text_utils.create_count_text(count, 'clip')

            _logger.info(
                ('Classifier will visit {} for detector "{}", station "{}", '
                 'mic output "{}", and date {}.').format(
                     count_text, detector.name, station.name, mic_output.name,
                     date))

            try:
                _classify_clips(clips, classifier)

            except Exception:
                _logger.error(
                    'Clip classification failed. See below for exception '
                    'traceback.')
                raise

        classifier.end_annotations()

        return True
Ejemplo n.º 6
0
    def execute(self, job_info):

        classifier = self._create_classifier(job_info.job_id)

        classifier.begin_annotations()

        value_tuples = self._create_clip_query_values_iterator()

        tag_name = model_utils.get_clip_query_tag_name(self._tag_name)

        for station, mic_output, date, detector in value_tuples:

            clips = _get_clips(station, mic_output, date, detector, tag_name)

            count = clips.count()
            count_text = text_utils.create_count_text(count, 'clip')

            _logger.info(f'Classifier will visit {count_text} for station '
                         f'"{station.name}", mic output "{mic_output.name}", '
                         f'date {date}, and detector "{detector.name}".')

            try:
                _classify_clips(clips, classifier)

            except Exception:
                _logger.error(
                    'Clip classification failed. See below for exception '
                    'traceback.')
                raise

        classifier.end_annotations()

        return True
Ejemplo n.º 7
0
    def _adjust_clips(self):

        start_time = time.time()

        value_tuples = self._create_clip_query_values_iterator()

        total_adjusted_count = 0
        total_count = 0

        for detector, station, mic_output, date in value_tuples:

            clips = model_utils.get_clips(station,
                                          mic_output,
                                          detector,
                                          date,
                                          self._query_annotation_name,
                                          self._query_annotation_value,
                                          order=False)

            adjusted_count = 0
            count = 0

            for clip in clips:

                if self._adjust_clip(clip):
                    adjusted_count += 1
                    total_adjusted_count += 1

                count += 1
                total_count += 1

            # Log clip count for this detector/station/mic_output/date.
            count_text = text_utils.create_count_text(count, 'clip')
            _logger.info(
                ('Adjusted {} of {} for detector "{}", station "{}", mic '
                 'output "{}", and date {}.').format(adjusted_count,
                                                     count_text, detector.name,
                                                     station.name,
                                                     mic_output.name, date))

        # Log total clips and processing rate.
        count_text = text_utils.create_count_text(total_count, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(elapsed_time, total_count,
                                                    'clips')
        _logger.info('Adjusted {} of a total of {}{}.'.format(
            total_adjusted_count, count_text, timing_text))
 def _delete_clip_audio_files(self):
     
     start_time = time.time()
     
     value_tuples = self._create_clip_query_values_iterator()
     
     total_num_clips = 0
     total_num_deleted_files = 0
     
     for station, mic_output, date, detector in value_tuples:
         
         clips = model_utils.get_clips(
             station=station,
             mic_output=mic_output,
             date=date,
             detector=detector,
             annotation_name=self._annotation_name,
             annotation_value=self._annotation_value,
             tag_name=self._tag_name,
             order=False)
         
         num_clips = len(clips)
         num_deleted_files = 0
         
         for clip in clips:
             if self._delete_clip_audio_file_if_needed(clip):
                 num_deleted_files += 1
             
         # Log file deletions for this detector/station/mic_output/date.
         count_text = text_utils.create_count_text(num_clips, 'clip')
         _logger.info(
             f'Deleted audio files for {num_deleted_files} of '
             f'{count_text} for station "{station.name}", '
             f'mic output "{mic_output.name}", date {date}, '
             f'and detector "{detector.name}".')
             
         total_num_clips += num_clips
         total_num_deleted_files += num_deleted_files
         
     # Log total file deletions and deletion rate.
     count_text = text_utils.create_count_text(total_num_clips, 'clip')
     elapsed_time = time.time() - start_time
     timing_text = command_utils.get_timing_text(
         elapsed_time, total_num_clips, 'clips')
     _logger.info(f'Processed a total of {count_text}{timing_text}.')
Ejemplo n.º 9
0
    def _get_recording_lists(self):
        
        try:
            
            # Get iterator for all recordings of specified station-nights.
            recordings = itertools.chain.from_iterable(
                self._get_station_recordings(
                    name, self._start_date, self._end_date)
                for name in self._station_names)
            
            # Get mapping from station-nights to recording lists.
            recording_lists = defaultdict(list)
            for recording in recordings:
                station = recording.station
                night = station.get_night(recording.start_time)
                recording_lists[(station.name, night)].append(recording)
            
            total_num_station_nights = len(recording_lists)
            station_nights_text = text_utils.create_count_text(
                total_num_station_nights, 'station-night')
            
            if self._process_random_station_nights:
                # will process recordings for randomly selected subset
                # of station-nights
                
                recording_lists = self._select_recording_lists(recording_lists)
                
                start_num = self._start_station_night_index + 1
                end_num = self._end_station_night_index
                
                self._logger.info((
                    'This command will process recordings for '
                    'station-nights {} to {} of a shuffled sequence '
                    'of {}.').format(start_num, end_num, station_nights_text))
                
            else:
                # will process recordings for all station-nights
                
                self._logger.info(
                    'This command will process recordings for {}.'.format(
                        station_nights_text))
                    
            # Sort recordings for each station-night by start time.
            for recordings in recording_lists.values():
                recordings.sort(key=lambda r: r.start_time)

            return recording_lists
        
        except Exception as e:
            self._logger.error((
                'Collection of recordings to process failed with '
                'an exception.\n'
                'The exception message was:\n'
                '    {}\n'
                'The archive was not modified.\n'
                'See below for exception traceback.').format(str(e)))
            raise
Ejemplo n.º 10
0
    def _get_recording_lists(self):

        try:

            # Get iterator for all recordings of specified station-nights.
            recordings = itertools.chain.from_iterable(
                self._get_station_recordings(name, self._start_date,
                                             self._end_date)
                for name in self._station_names)

            # Get mapping from station-nights to recording lists.
            recording_lists = defaultdict(list)
            for recording in recordings:
                station = recording.station
                night = station.get_night(recording.start_time)
                recording_lists[(station.name, night)].append(recording)

            total_num_station_nights = len(recording_lists)
            station_nights_text = text_utils.create_count_text(
                total_num_station_nights, 'station-night')

            if self._process_random_station_nights:
                # will process recordings for randomly selected subset
                # of station-nights

                recording_lists = self._select_recording_lists(recording_lists)

                start_num = self._start_station_night_index + 1
                end_num = self._end_station_night_index

                self._logger.info(
                    ('This command will process recordings for '
                     'station-nights {} to {} of a shuffled sequence '
                     'of {}.').format(start_num, end_num, station_nights_text))

            else:
                # will process recordings for all station-nights

                self._logger.info(
                    'This command will process recordings for {}.'.format(
                        station_nights_text))

            # Sort recordings for each station-night by start time.
            for recordings in recording_lists.values():
                recordings.sort(key=lambda r: r.start_time)

            return recording_lists

        except Exception as e:
            self._logger.error(
                ('Collection of recordings to process failed with '
                 'an exception.\n'
                 'The exception message was:\n'
                 '    {}\n'
                 'The archive was not modified.\n'
                 'See below for exception traceback.').format(str(e)))
            raise
    def _log_clips_not_found(self, num_clips):

        indices_text = 'index' if num_clips == 1 else 'indices'

        count_text = text_utils.create_count_text(num_clips, 'clip')

        self._logger.info(
            f'    Could not find start {indices_text} of {count_text} '
            f'in recording channel.')
Ejemplo n.º 12
0
    def _create_clip_audio_files(self):

        start_time = time.time()

        value_tuples = self._create_clip_query_values_iterator()

        total_num_clips = 0
        total_num_created_files = 0

        for detector, station, mic_output, date in value_tuples:

            clips = model_utils.get_clips(station,
                                          mic_output,
                                          detector,
                                          date,
                                          self._annotation_name,
                                          self._annotation_value,
                                          order=False)

            num_clips = len(clips)
            num_created_files = 0

            for clip in clips:
                if self._create_clip_audio_file_if_needed(clip):
                    num_created_files += 1

            # Log file creations for this detector/station/mic_output/date.
            count_text = text_utils.create_count_text(num_clips, 'clip')
            _logger.info(
                ('Created audio files for {} of {} for detector "{}", '
                 'station "{}", mic output "{}", and date {}.').format(
                     num_created_files, count_text, detector.name,
                     station.name, mic_output.name, date))

            total_num_clips += num_clips
            total_num_created_files += num_created_files

        # Log total file creations and creation rate.
        count_text = text_utils.create_count_text(total_num_clips, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(elapsed_time,
                                                    total_num_clips, 'clips')
        _logger.info('Processed a total of {}{}.'.format(
            count_text, timing_text))
Ejemplo n.º 13
0
    def _adjust_clips(self):
        
        start_time = time.time()
        
        value_tuples = self._create_clip_query_values_iterator()
        
        total_adjusted_count = 0
        total_count = 0
        
        for detector, station, mic_output, date in value_tuples:
            
            clips = model_utils.get_clips(
                station, mic_output, detector, date,
                self._query_annotation_name, self._query_annotation_value,
                order=False)
            
            adjusted_count = 0
            count = 0
            
            for clip in clips:
                
                if self._adjust_clip(clip):
                    adjusted_count += 1
                    total_adjusted_count += 1
                    
                count += 1
                total_count += 1
                
            # Log clip count for this detector/station/mic_output/date.
            count_text = text_utils.create_count_text(count, 'clip')
            _logger.info((
                'Adjusted {} of {} for detector "{}", station "{}", mic '
                'output "{}", and date {}.').format(
                    adjusted_count, count_text, detector.name, station.name,
                    mic_output.name, date))

        # Log total clips and processing rate.
        count_text = text_utils.create_count_text(total_count, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(
            elapsed_time, total_count, 'clips')
        _logger.info('Adjusted {} of a total of {}{}.'.format(
            total_adjusted_count, count_text, timing_text))
Ejemplo n.º 14
0
 def complete_processing(self, threshold=None):
     
     # Create remaining clips.
     self._create_clips(threshold)
     
     clips_text = text_utils.create_count_text(self._num_clips, 'clip')
     
     if self._defer_clip_creation:
         
         self._write_deferred_clips_file()
         
         self._logger.info((
             '        Processed {} from detector "{}".').format(
                 clips_text, self._detector_model.name))
         
     elif self._num_database_failures == 0 and self._num_file_failures == 0:
         
         self._logger.info((
             '        Created {} from detector "{}".').format(
                 clips_text, self._detector_model.name))
         
     else:
         
         db_failures_text = text_utils.create_count_text(
             self._num_database_failures, 'clip creation failure')
         
         if self._create_clip_files:
             
             num_file_failures = \
                 self._num_database_failures + self._num_file_failures
             
             file_failures_text = ' and ' + text_utils.create_count_text(
                 num_file_failures, 'audio file creation failure')
             
         else:
             
             file_failures_text = ''
         
         self._logger.info(
             '        Processed {} from detector "{}" with {}{}.'.format(
                 clips_text, self._detector_model.name,
                 db_failures_text, file_failures_text))
Ejemplo n.º 15
0
    def complete_processing(self, threshold=None):

        # Create remaining clips.
        self._create_clips(threshold)

        clips_text = text_utils.create_count_text(self._num_clips, 'clip')

        if self._defer_clip_creation:

            self._write_deferred_clips_file()

            self._logger.info(
                ('        Processed {} from detector "{}".').format(
                    clips_text, self._detector_model.name))

        elif self._num_database_failures == 0 and self._num_file_failures == 0:

            self._logger.info(
                ('        Created {} from detector "{}".').format(
                    clips_text, self._detector_model.name))

        else:

            db_failures_text = text_utils.create_count_text(
                self._num_database_failures, 'clip creation failure')

            if self._create_clip_files:

                num_file_failures = \
                    self._num_database_failures + self._num_file_failures

                file_failures_text = ' and ' + text_utils.create_count_text(
                    num_file_failures, 'audio file creation failure')

            else:

                file_failures_text = ''

            self._logger.info(
                '        Processed {} from detector "{}" with {}{}.'.format(
                    clips_text, self._detector_model.name, db_failures_text,
                    file_failures_text))
 def _create_clip_audio_files(self):
     
     start_time = time.time()
     
     value_tuples = self._create_clip_query_values_iterator()
     
     total_num_clips = 0
     total_num_created_files = 0
     
     for detector, station, mic_output, date in value_tuples:
         
         clips = model_utils.get_clips(
             station, mic_output, detector, date, self._annotation_name,
             self._annotation_value, order=False)
         
         num_clips = len(clips)
         num_created_files = 0
         
         for clip in clips:
             if self._create_clip_audio_file_if_needed(clip):
                 num_created_files += 1
             
         # Log file creations for this detector/station/mic_output/date.
         count_text = text_utils.create_count_text(num_clips, 'clip')
         _logger.info((
             'Created audio files for {} of {} for detector "{}", '
             'station "{}", mic output "{}", and date {}.').format(
                 num_created_files, count_text, detector.name,
                 station.name, mic_output.name, date))
             
         total_num_clips += num_clips
         total_num_created_files += num_created_files
         
     # Log total file creations and creation rate.
     count_text = text_utils.create_count_text(total_num_clips, 'clip')
     elapsed_time = time.time() - start_time
     timing_text = command_utils.get_timing_text(
         elapsed_time, total_num_clips, 'clips')
     _logger.info(
         'Processed a total of {}{}.'.format(count_text, timing_text))
Ejemplo n.º 17
0
    def _refresh_recording_file_paths(self, recording_file_paths):

        start_time = time.time()

        file_count = RecordingFile.objects.count()
        count_text = text_utils.create_count_text(file_count, 'recording file')

        self._logger.info('Command will visit {}.'.format(count_text))

        updated_count = 0

        for i, file_ in enumerate(RecordingFile.objects.all()):

            visited_count = i + 1

            if visited_count % _LOGGING_PERIOD == 0:
                self._logger.info(
                    '    Visited {} files...'.format(visited_count))

            old_path = file_.path
            file_name = _get_file_name(old_path)

            new_path = recording_file_paths.get(file_name)

            if new_path is None:
                self._logger.warning(
                    ('    Could not find recording file "{}" in recording '
                     'directories.').format(file_name))

            elif new_path != old_path:
                # self._logger.info(
                #     '        Update "{}" to "{}"...'.format(
                #         old_path, new_path))
                file_.path = new_path
                file_.save()
                updated_count += 1

        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(elapsed_time,
                                                    visited_count, 'files')

        self._logger.info(
            ('Updated paths for {} of {} visited recording files{}.').format(
                updated_count, visited_count, timing_text))
Ejemplo n.º 18
0
    def _log_detection_performance(self, num_detectors, num_channels,
                                   interval_duration, processing_time):

        format_ = text_utils.format_number

        dur = format_(interval_duration)
        time = format_(processing_time)

        detectors_text = text_utils.create_count_text(num_detectors,
                                                      'detector')

        message = ('        Ran {} on {} seconds of {}-channel '
                   'audio in {} seconds').format(detectors_text, dur,
                                                 num_channels, time)

        if processing_time != 0:
            total_duration = num_detectors * num_channels * interval_duration
            speedup = format_(total_duration / processing_time)
            message += ', {} times faster than real time.'.format(speedup)
        else:
            message += '.'

        self._logger.info(message)
Ejemplo n.º 19
0
    def _delete_clips(self, retain_indices):
        
        start_time = time.time()
        
        retaining_clips = len(retain_indices) == 0
        
        value_tuples = self._create_clip_query_values_iterator()
        
        index = 0
        total_retained_count = 0
        
        for station, mic_output, date, detector in value_tuples:
            
            # Get clips for this station, mic_output, date, and detector
            clips = model_utils.get_clips(
                station=station,
                mic_output=mic_output,
                date=date,
                detector=detector,
                annotation_name=self._annotation_name,
                annotation_value=self._annotation_value,
                tag_name=self._tag_name,
                order=False)
            
            
            # Figure out which clips should be deleted.
            
            count = 0
            retained_count = 0
            clips_to_delete = []
            
            for clip in clips:
                
                if index not in retain_indices:
                    clips_to_delete.append(clip)
                else:
                    retained_count += 1
                    
                count += 1
                index += 1
                
                
            # Delete clips.
            try:
                self._delete_clip_batch(clips_to_delete)
            except Exception as e:
                batch_text = \
                    _get_batch_text(station, mic_output, date, detector)
                command_utils.log_and_reraise_fatal_exception(
                    e, f'Deletion of clips for {batch_text}')

            # Log deletions.
            if retaining_clips:
                prefix = 'Deleted'
            else:
                deleted_count = count - retained_count
                prefix = (
                    f'Deleted {deleted_count} and retained '
                    f'{retained_count} of')
            count_text = text_utils.create_count_text(count, 'clip')
            batch_text = _get_batch_text(station, mic_output, date, detector)
            _logger.info(f'{prefix} {count_text} for {batch_text}.')

            total_retained_count += retained_count
                
        # Log total deletions and deletion rate.
        if total_retained_count == 0:
            prefix = 'Deleted'
        else:
            deleted_count = index - total_retained_count
            prefix = (
                f'Deleted {deleted_count} and retained '
                f'{total_retained_count} of')
        count_text = text_utils.create_count_text(index, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(
            elapsed_time, index, 'clips')
        _logger.info(f'{prefix} a total of {count_text}{timing_text}.')
Ejemplo n.º 20
0
    def _delete_clips(self, retain_indices):
        
        start_time = time.time()
        
        retaining_clips = len(retain_indices) == 0
        
        value_tuples = self._create_clip_query_values_iterator()
        
        index = 0
        total_retained_count = 0
        
        for detector, station, mic_output, date in value_tuples:
            
            # Get clips for this detector, station, mic_output, and date
            clips = model_utils.get_clips(
                station, mic_output, detector, date, self._annotation_name,
                self._annotation_value, order=False)
            
            
            # Figure out which clips should be deleted.
            
            count = 0
            retained_count = 0
            clips_to_delete = []
            
            for clip in clips:
                
                if index not in retain_indices:
                    clips_to_delete.append(clip)
                else:
                    retained_count += 1
                    
                count += 1
                index += 1
                
                
            # Delete clips.
            try:
                self._delete_clip_batch(clips_to_delete)
            except Exception as e:
                batch_text = \
                    _get_batch_text(detector, station, mic_output, date)
                command_utils.log_and_reraise_fatal_exception(
                    e, 'Deletion of clips for {}'.format(batch_text))

            # Log deletions.
            if retaining_clips:
                prefix = 'Deleted'
            else:
                deleted_count = count - retained_count
                prefix = 'Deleted {} and retained {} of'.format(
                    deleted_count, retained_count)
            count_text = text_utils.create_count_text(count, 'clip')
            batch_text = _get_batch_text(detector, station, mic_output, date)
            _logger.info(
                '{} {} for {}.'.format(prefix, count_text, batch_text))

            total_retained_count += retained_count
                
        # Log total deletions and deletion rate.
        if total_retained_count == 0:
            prefix = 'Deleted'
        else:
            deleted_count = index - total_retained_count
            prefix = 'Deleted {} and retained {} of'.format(
                deleted_count, total_retained_count)
        count_text = text_utils.create_count_text(index, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(
            elapsed_time, index, 'clips')
        _logger.info('{} a total of {}{}.'.format(
            prefix, count_text, timing_text))