def _transfer_classifications_aux(self, station, mic_output, date):

        source_clips = model_utils.get_clips(
            station=station,
            mic_output=mic_output,
            date=date,
            detector=self._source_detector,
            annotation_name=self._annotation_name,
            annotation_value=self._annotation_value)

        target_clips = model_utils.get_clips(
            station=station,
            mic_output=mic_output,
            date=date,
            detector=self._target_detector,
            annotation_name=self._annotation_name)

        matches = self._match_clips_with_calls(source_clips, target_clips)

        _logger.info(
            f'{self._source_detector.name} -> {self._target_detector.name} / '
            f'{station.name} / {mic_output.name} / {date} / '
            f'{source_clips.count()}  {target_clips.count()} {len(matches)}')

        if len(matches) > 0:

            source_clips = list(source_clips.all())
            target_clips = list(target_clips.all())

            # self._show_matches(matches, source_clips, target_clips)

            for i, j in matches:
                self._transfer_classification(source_clips[i], target_clips[j])
 def _transfer_classifications_aux(self, station, mic_output, date):
     
     source_clips = model_utils.get_clips(
         station, mic_output, self._source_detector, date,
         self._annotation_name, self._annotation_value)
             
     target_clips = model_utils.get_clips(
         station, mic_output, self._target_detector, date,
         self._annotation_name, None)
     
     matches = self._match_clips_with_calls(source_clips, target_clips)
     
     _logger.info('{} -> {} / {} / {} / {} / {}  {} {}'.format(
         self._source_detector.name, self._target_detector.name,
         station.name, mic_output.name, date, source_clips.count(),
         target_clips.count(), len(matches)))
     
     if len(matches) > 0:
         
         source_clips = list(source_clips.all())
         target_clips = list(target_clips.all())
         
         # self._show_matches(matches, source_clips, target_clips)
         
         for i, j in matches:
             self._transfer_classification(source_clips[i], target_clips[j])
 def _transfer_classifications_aux(self, station, mic_output, date):
     
     source_clips = model_utils.get_clips(
         station, mic_output, self._source_detector, date,
         self._annotation_name, self._annotation_value)
             
     target_clips = model_utils.get_clips(
         station, mic_output, self._target_detector, date,
         self._annotation_name, None)
     
     matches = self._match_clips_with_calls(source_clips, target_clips)
     
     _logger.info('{} -> {} / {} / {} / {} / {}  {} {}'.format(
         self._source_detector.name, self._target_detector.name,
         station.name, mic_output.name, date, source_clips.count(),
         target_clips.count(), len(matches)))
     
     if len(matches) > 0:
         
         source_clips = list(source_clips.all())
         target_clips = list(target_clips.all())
         
         # self._show_matches(matches, source_clips, target_clips)
         
         for i, j in matches:
             self._transfer_classification(source_clips[i], target_clips[j])
Beispiel #4
0
def _get_bird_counts(station, mic_output, detector, start_date, end_date,
                     annotation_name, annotation_values,
                     count_suppression_interval):

    counts = {}

    for annotation_value in annotation_values:

        if _is_call_type(annotation_value):

            for date in _date_range(start_date, end_date):

                clips = model_utils.get_clips(
                    station=station,
                    mic_output=mic_output,
                    date=date,
                    detector=detector,
                    annotation_name=annotation_name,
                    annotation_value=annotation_value)

                times = sorted(c.start_time for c in clips)

                count = clip_count_utils.get_bird_count(
                    times, count_suppression_interval)

                counts[(date, annotation_value)] = count

    return counts
Beispiel #5
0
def _get_clips(station, mic_output, date, detector, tag_name):

    try:

        return model_utils.get_clips(station=station,
                                     mic_output=mic_output,
                                     date=date,
                                     detector=detector,
                                     tag_name=tag_name)

    except Exception as e:
        command_utils.log_and_reraise_fatal_exception(e, 'Clip query')
 def _count_clips(self):
     
     value_tuples = self._create_clip_query_values_iterator()
     count = 0
     
     for detector, station, mic_output, date in value_tuples:
         
         clips = model_utils.get_clips(
             station, mic_output, detector, date, self._annotation_name,
             self._annotation_value, order=False)
         
         count += clips.count()
         
     return count
    def _adjust_clips(self):

        start_time = time.time()

        value_tuples = self._create_clip_query_values_iterator()

        total_adjusted_count = 0
        total_count = 0

        for detector, station, mic_output, date in value_tuples:

            clips = model_utils.get_clips(station,
                                          mic_output,
                                          detector,
                                          date,
                                          self._query_annotation_name,
                                          self._query_annotation_value,
                                          order=False)

            adjusted_count = 0
            count = 0

            for clip in clips:

                if self._adjust_clip(clip):
                    adjusted_count += 1
                    total_adjusted_count += 1

                count += 1
                total_count += 1

            # Log clip count for this detector/station/mic_output/date.
            count_text = text_utils.create_count_text(count, 'clip')
            _logger.info(
                ('Adjusted {} of {} for detector "{}", station "{}", mic '
                 'output "{}", and date {}.').format(adjusted_count,
                                                     count_text, detector.name,
                                                     station.name,
                                                     mic_output.name, date))

        # Log total clips and processing rate.
        count_text = text_utils.create_count_text(total_count, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(elapsed_time, total_count,
                                                    'clips')
        _logger.info('Adjusted {} of a total of {}{}.'.format(
            total_adjusted_count, count_text, timing_text))
 def _delete_clip_audio_files(self):
     
     start_time = time.time()
     
     value_tuples = self._create_clip_query_values_iterator()
     
     total_num_clips = 0
     total_num_deleted_files = 0
     
     for station, mic_output, date, detector in value_tuples:
         
         clips = model_utils.get_clips(
             station=station,
             mic_output=mic_output,
             date=date,
             detector=detector,
             annotation_name=self._annotation_name,
             annotation_value=self._annotation_value,
             tag_name=self._tag_name,
             order=False)
         
         num_clips = len(clips)
         num_deleted_files = 0
         
         for clip in clips:
             if self._delete_clip_audio_file_if_needed(clip):
                 num_deleted_files += 1
             
         # Log file deletions for this detector/station/mic_output/date.
         count_text = text_utils.create_count_text(num_clips, 'clip')
         _logger.info(
             f'Deleted audio files for {num_deleted_files} of '
             f'{count_text} for station "{station.name}", '
             f'mic output "{mic_output.name}", date {date}, '
             f'and detector "{detector.name}".')
             
         total_num_clips += num_clips
         total_num_deleted_files += num_deleted_files
         
     # Log total file deletions and deletion rate.
     count_text = text_utils.create_count_text(total_num_clips, 'clip')
     elapsed_time = time.time() - start_time
     timing_text = command_utils.get_timing_text(
         elapsed_time, total_num_clips, 'clips')
     _logger.info(f'Processed a total of {count_text}{timing_text}.')
Beispiel #9
0
    def _create_clip_audio_files(self):

        start_time = time.time()

        value_tuples = self._create_clip_query_values_iterator()

        total_num_clips = 0
        total_num_created_files = 0

        for detector, station, mic_output, date in value_tuples:

            clips = model_utils.get_clips(station,
                                          mic_output,
                                          detector,
                                          date,
                                          self._annotation_name,
                                          self._annotation_value,
                                          order=False)

            num_clips = len(clips)
            num_created_files = 0

            for clip in clips:
                if self._create_clip_audio_file_if_needed(clip):
                    num_created_files += 1

            # Log file creations for this detector/station/mic_output/date.
            count_text = text_utils.create_count_text(num_clips, 'clip')
            _logger.info(
                ('Created audio files for {} of {} for detector "{}", '
                 'station "{}", mic output "{}", and date {}.').format(
                     num_created_files, count_text, detector.name,
                     station.name, mic_output.name, date))

            total_num_clips += num_clips
            total_num_created_files += num_created_files

        # Log total file creations and creation rate.
        count_text = text_utils.create_count_text(total_num_clips, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(elapsed_time,
                                                    total_num_clips, 'clips')
        _logger.info('Processed a total of {}{}.'.format(
            count_text, timing_text))
    def _adjust_clips(self):
        
        start_time = time.time()
        
        value_tuples = self._create_clip_query_values_iterator()
        
        total_adjusted_count = 0
        total_count = 0
        
        for detector, station, mic_output, date in value_tuples:
            
            clips = model_utils.get_clips(
                station, mic_output, detector, date,
                self._query_annotation_name, self._query_annotation_value,
                order=False)
            
            adjusted_count = 0
            count = 0
            
            for clip in clips:
                
                if self._adjust_clip(clip):
                    adjusted_count += 1
                    total_adjusted_count += 1
                    
                count += 1
                total_count += 1
                
            # Log clip count for this detector/station/mic_output/date.
            count_text = text_utils.create_count_text(count, 'clip')
            _logger.info((
                'Adjusted {} of {} for detector "{}", station "{}", mic '
                'output "{}", and date {}.').format(
                    adjusted_count, count_text, detector.name, station.name,
                    mic_output.name, date))

        # Log total clips and processing rate.
        count_text = text_utils.create_count_text(total_count, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(
            elapsed_time, total_count, 'clips')
        _logger.info('Adjusted {} of a total of {}{}.'.format(
            total_adjusted_count, count_text, timing_text))
 def _create_clip_audio_files(self):
     
     start_time = time.time()
     
     value_tuples = self._create_clip_query_values_iterator()
     
     total_num_clips = 0
     total_num_created_files = 0
     
     for detector, station, mic_output, date in value_tuples:
         
         clips = model_utils.get_clips(
             station, mic_output, detector, date, self._annotation_name,
             self._annotation_value, order=False)
         
         num_clips = len(clips)
         num_created_files = 0
         
         for clip in clips:
             if self._create_clip_audio_file_if_needed(clip):
                 num_created_files += 1
             
         # Log file creations for this detector/station/mic_output/date.
         count_text = text_utils.create_count_text(num_clips, 'clip')
         _logger.info((
             'Created audio files for {} of {} for detector "{}", '
             'station "{}", mic output "{}", and date {}.').format(
                 num_created_files, count_text, detector.name,
                 station.name, mic_output.name, date))
             
         total_num_clips += num_clips
         total_num_created_files += num_created_files
         
     # Log total file creations and creation rate.
     count_text = text_utils.create_count_text(total_num_clips, 'clip')
     elapsed_time = time.time() - start_time
     timing_text = command_utils.get_timing_text(
         elapsed_time, total_num_clips, 'clips')
     _logger.info(
         'Processed a total of {}{}.'.format(count_text, timing_text))
def annotate_old_bird_calls():
    
    center_index_annotation_info = \
        AnnotationInfo.objects.get(name=CENTER_INDEX_ANNOTATION_NAME)
    center_freq_annotation_info = \
        AnnotationInfo.objects.get(name=CENTER_FREQ_ANNOTATION_NAME)
    classification_annotation_info = \
        AnnotationInfo.objects.get(name=CLASSIFICATION_ANNOTATION_NAME)
        
    user = User.objects.get(username='******')
        
    sm_pairs = model_utils.get_station_mic_output_pairs_list()
    
    ground_truth_detector = Processor.objects.get(
        name=GROUND_TRUTH_DETECTOR_NAME)
    
    rows = []
    
    for detector_name, annotation_value in DETECTOR_DATA:
        
        short_detector_name = detector_name.split()[2]
        old_bird_detector = Processor.objects.get(name=detector_name)
        window = utils.OLD_BIRD_CLIP_CALL_CENTER_WINDOWS[short_detector_name]
                 
        for station, mic_output in sm_pairs:
 
            station_num = int(station.name.split()[1])
            
            print('{} {}...'.format(short_detector_name, station_num))
            
            ground_truth_clips = list(model_utils.get_clips(
                station, mic_output, ground_truth_detector, None,
                CLASSIFICATION_ANNOTATION_NAME, annotation_value))
            
            ground_truth_call_center_indices = \
                [c.start_index + c.length // 2 for c in ground_truth_clips]
                          
            ground_truth_call_count = len(ground_truth_clips)

            old_bird_clips = list(model_utils.get_clips(
                station, mic_output, old_bird_detector))
            
            old_bird_clip_count = len(old_bird_clips)

            clips = [(c.start_index, c.length) for c in old_bird_clips]
            matches = utils.match_clips_with_calls(
                clips, ground_truth_call_center_indices, window)
            
            old_bird_call_count = len(matches)
            
            rows.append([
                short_detector_name, station_num, ground_truth_call_count,
                old_bird_call_count, old_bird_clip_count])

            if ANNOTATE:
                
                # Clear any existing annotations.
                for clip in old_bird_clips:
                    model_utils.delete_clip_annotation(
                        clip, classification_annotation_info,
                        creating_user=user)
                    
                # Create new annotations.
                for i, j in matches:
                    
                    old_bird_clip = old_bird_clips[i]
                    call_center_index = ground_truth_call_center_indices[j]
                    ground_truth_clip = ground_truth_clips[j]
                    
                    # Annotate Old Bird clip call center index.
                    model_utils.annotate_clip(
                        old_bird_clip, center_index_annotation_info,
                        str(call_center_index), creating_user=user)
                    
                    # Get ground truth clip call center frequency.
                    annotations = \
                        model_utils.get_clip_annotations(ground_truth_clip)
                    call_center_freq = annotations[CENTER_FREQ_ANNOTATION_NAME]

                    # Annotate Old Bird clip call center frequency.
                    model_utils.annotate_clip(
                        old_bird_clip, center_freq_annotation_info,
                        call_center_freq, creating_user=user)
                
                    model_utils.annotate_clip(
                        old_bird_clip, classification_annotation_info,
                        annotation_value, creating_user=user)
                        
    return rows
Beispiel #13
0
def annotate_detected_calls():

    center_index_annotation_info = \
        AnnotationInfo.objects.get(name=CENTER_INDEX_ANNOTATION_NAME)
    center_freq_annotation_info = \
        AnnotationInfo.objects.get(name=CENTER_FREQ_ANNOTATION_NAME)
    classification_annotation_info = \
        AnnotationInfo.objects.get(name=CLASSIFICATION_ANNOTATION_NAME)

    user = User.objects.get(username='******')

    sm_pairs = model_utils.get_station_mic_output_pairs_list()

    ground_truth_detector = Processor.objects.get(
        name=GROUND_TRUTH_DETECTOR_NAME)

    rows = []

    for detector_name, annotation_value in DETECTOR_DATA:

        short_detector_name = detector_name.split()[2]
        old_bird_detector = Processor.objects.get(name=detector_name)
        window = utils.CALL_CENTER_WINDOWS[short_detector_name]

        for station, mic_output in sm_pairs:

            station_num = int(station.name.split()[1])

            print('{} {}...'.format(short_detector_name, station_num))

            ground_truth_clips = list(
                model_utils.get_clips(station, mic_output,
                                      ground_truth_detector, None,
                                      CLASSIFICATION_ANNOTATION_NAME,
                                      annotation_value))

            ground_truth_call_center_indices = \
                [c.start_index + c.length // 2 for c in ground_truth_clips]

            ground_truth_call_count = len(ground_truth_clips)

            old_bird_clips = list(
                model_utils.get_clips(station, mic_output, old_bird_detector))

            old_bird_clip_count = len(old_bird_clips)

            clips = [(c.start_index, c.length) for c in old_bird_clips]
            matches = utils.match_clips_with_calls(
                clips, ground_truth_call_center_indices, window)

            old_bird_call_count = len(matches)

            rows.append([
                short_detector_name, station_num, ground_truth_call_count,
                old_bird_call_count, old_bird_clip_count
            ])

            if ANNOTATE:

                # Clear any existing annotations.
                for clip in old_bird_clips:
                    model_utils.delete_clip_annotation(
                        clip,
                        classification_annotation_info,
                        creating_user=user)

                # Create new annotations.
                for i, j in matches:

                    old_bird_clip = old_bird_clips[i]
                    call_center_index = ground_truth_call_center_indices[j]
                    ground_truth_clip = ground_truth_clips[j]

                    # Annotate Old Bird clip call center index.
                    model_utils.annotate_clip(old_bird_clip,
                                              center_index_annotation_info,
                                              str(call_center_index),
                                              creating_user=user)

                    # Get ground truth clip call center frequency.
                    annotations = \
                        model_utils.get_clip_annotations(ground_truth_clip)
                    call_center_freq = annotations[CENTER_FREQ_ANNOTATION_NAME]

                    # Annotate Old Bird clip call center frequency.
                    model_utils.annotate_clip(old_bird_clip,
                                              center_freq_annotation_info,
                                              call_center_freq,
                                              creating_user=user)

                    model_utils.annotate_clip(old_bird_clip,
                                              classification_annotation_info,
                                              annotation_value,
                                              creating_user=user)

    return rows
    def _delete_clips(self, retain_indices):
        
        start_time = time.time()
        
        retaining_clips = len(retain_indices) == 0
        
        value_tuples = self._create_clip_query_values_iterator()
        
        index = 0
        total_retained_count = 0
        
        for detector, station, mic_output, date in value_tuples:
            
            # Get clips for this detector, station, mic_output, and date
            clips = model_utils.get_clips(
                station, mic_output, detector, date, self._annotation_name,
                self._annotation_value, order=False)
            
            
            # Figure out which clips should be deleted.
            
            count = 0
            retained_count = 0
            clips_to_delete = []
            
            for clip in clips:
                
                if index not in retain_indices:
                    clips_to_delete.append(clip)
                else:
                    retained_count += 1
                    
                count += 1
                index += 1
                
                
            # Delete clips.
            try:
                self._delete_clip_batch(clips_to_delete)
            except Exception as e:
                batch_text = \
                    _get_batch_text(detector, station, mic_output, date)
                command_utils.log_and_reraise_fatal_exception(
                    e, 'Deletion of clips for {}'.format(batch_text))

            # Log deletions.
            if retaining_clips:
                prefix = 'Deleted'
            else:
                deleted_count = count - retained_count
                prefix = 'Deleted {} and retained {} of'.format(
                    deleted_count, retained_count)
            count_text = text_utils.create_count_text(count, 'clip')
            batch_text = _get_batch_text(detector, station, mic_output, date)
            _logger.info(
                '{} {} for {}.'.format(prefix, count_text, batch_text))

            total_retained_count += retained_count
                
        # Log total deletions and deletion rate.
        if total_retained_count == 0:
            prefix = 'Deleted'
        else:
            deleted_count = index - total_retained_count
            prefix = 'Deleted {} and retained {} of'.format(
                deleted_count, total_retained_count)
        count_text = text_utils.create_count_text(index, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(
            elapsed_time, index, 'clips')
        _logger.info('{} a total of {}{}.'.format(
            prefix, count_text, timing_text))
Beispiel #15
0
    def _delete_clips(self, retain_indices):
        
        start_time = time.time()
        
        retaining_clips = len(retain_indices) == 0
        
        value_tuples = self._create_clip_query_values_iterator()
        
        index = 0
        total_retained_count = 0
        
        for station, mic_output, date, detector in value_tuples:
            
            # Get clips for this station, mic_output, date, and detector
            clips = model_utils.get_clips(
                station=station,
                mic_output=mic_output,
                date=date,
                detector=detector,
                annotation_name=self._annotation_name,
                annotation_value=self._annotation_value,
                tag_name=self._tag_name,
                order=False)
            
            
            # Figure out which clips should be deleted.
            
            count = 0
            retained_count = 0
            clips_to_delete = []
            
            for clip in clips:
                
                if index not in retain_indices:
                    clips_to_delete.append(clip)
                else:
                    retained_count += 1
                    
                count += 1
                index += 1
                
                
            # Delete clips.
            try:
                self._delete_clip_batch(clips_to_delete)
            except Exception as e:
                batch_text = \
                    _get_batch_text(station, mic_output, date, detector)
                command_utils.log_and_reraise_fatal_exception(
                    e, f'Deletion of clips for {batch_text}')

            # Log deletions.
            if retaining_clips:
                prefix = 'Deleted'
            else:
                deleted_count = count - retained_count
                prefix = (
                    f'Deleted {deleted_count} and retained '
                    f'{retained_count} of')
            count_text = text_utils.create_count_text(count, 'clip')
            batch_text = _get_batch_text(station, mic_output, date, detector)
            _logger.info(f'{prefix} {count_text} for {batch_text}.')

            total_retained_count += retained_count
                
        # Log total deletions and deletion rate.
        if total_retained_count == 0:
            prefix = 'Deleted'
        else:
            deleted_count = index - total_retained_count
            prefix = (
                f'Deleted {deleted_count} and retained '
                f'{total_retained_count} of')
        count_text = text_utils.create_count_text(index, 'clip')
        elapsed_time = time.time() - start_time
        timing_text = command_utils.get_timing_text(
            elapsed_time, index, 'clips')
        _logger.info(f'{prefix} a total of {count_text}{timing_text}.')
Beispiel #16
0
def _get_clips(*args):
    try:
        return model_utils.get_clips(*args)
    except Exception as e:
        command_utils.log_and_reraise_fatal_exception(e, 'Clip query')
Beispiel #17
0
def _get_clips(*args):
    try:
        return model_utils.get_clips(*args)
    except Exception as e:
        command_utils.log_and_reraise_fatal_exception(e, 'Clip query')