def _delete_clip_batch(self, clips):
     
     with archive_lock.atomic():
          
         with transaction.atomic():
         
             # Delete clips in chunks to limit the number of clip IDs
             # we pass to `Clip.objects.filter`.
             
             # Setting this too large can result in a
             # django.db.utils.OperationalError exception with the
             # message "too many SQL variables". We have seen this
             # happen with a maximum chunk size of 1000 on Windows,
             # though not on macOS. The web page
             # https://stackoverflow.com/questions/7106016/
             # too-many-sql-variables-error-in-django-witih-sqlite3
             # suggests that the maximum chunk size that will work
             # on Windows is somewhere between 900 and 1000, and
             # 900 seems to work.
             max_chunk_size = 900
             
             for i in range(0, len(clips), max_chunk_size):
                 
                 chunk = clips[i:i + max_chunk_size]
                 
                 # Delete clips from archive database.
                 ids = [clip.id for clip in chunk]
                 Clip.objects.filter(id__in=ids).delete()
                 
     # Delete clip audio files. We do this after the transaction so
     # that if the transaction fails, leaving the clips in the
     # database and raising an exception, we don't delete any clip
     # files.
     for clip in clips:
         self._clip_manager.delete_audio_file(clip)
 def _archive_clip(self, file_path, samples, start_index):
     
     station = self._recording.station
     
     # Get clip start time as a `datetime`.
     start_seconds = start_index / self._sample_rate
     start_delta = datetime.timedelta(seconds=start_seconds)
     start_time = self._recording.start_time + start_delta
     
     # Get clip length in sample frames.
     length = len(samples)
     
     end_time = signal_utils.get_end_time(
         start_time, length, self._sample_rate)
     
     creation_time = time_utils.get_utc_now()
     
     try:
         
         with archive_lock.atomic():
             
             with transaction.atomic():
                 
                 clip = Clip.objects.create(
                     station=station,
                     mic_output=self._mic_output,
                     recording_channel=self._recording_channel,
                     start_index=start_index,
                     length=length,
                     sample_rate=self._sample_rate,
                     start_time=start_time,
                     end_time=end_time,
                     date=station.get_night(start_time),
                     creation_time=creation_time,
                     creating_user=None,
                     creating_job=self._job,
                     creating_processor=self._detector
                 )
                 
                 # We must create the clip audio file after creating
                 # the clip row in the database. The file's path
                 # depends on the clip ID, which is set as part of
                 # creating the clip row.
                 #
                 # We create the audio file within the database
                 # transaction to ensure that the clip row and
                 # audio file are created atomically.
                 if self._create_clip_files:
                     self._clip_manager.create_audio_file(clip, samples)
                                 
     except Exception as e:
         self._logger.error((
             'Attempt to create clip from file "{}" failed with message: '
             '{}. File will be ignored.').format(
                 file_path, str(e)))
     
     else:
         self._logger.info('Archived {} clip {}.'.format(self.name, clip))
    def _archive_clip(self, file_path, samples, start_index):

        station = self._recording.station

        # Get clip start time as a `datetime`.
        start_seconds = start_index / self._sample_rate
        start_delta = datetime.timedelta(seconds=start_seconds)
        start_time = self._recording.start_time + start_delta

        # Get clip length in sample frames.
        length = len(samples)

        end_time = signal_utils.get_end_time(start_time, length,
                                             self._sample_rate)

        creation_time = time_utils.get_utc_now()

        try:

            with archive_lock.atomic():

                with transaction.atomic():

                    clip = Clip.objects.create(
                        station=station,
                        mic_output=self._mic_output,
                        recording_channel=self._recording_channel,
                        start_index=start_index,
                        length=length,
                        sample_rate=self._sample_rate,
                        start_time=start_time,
                        end_time=end_time,
                        date=station.get_night(start_time),
                        creation_time=creation_time,
                        creating_user=None,
                        creating_job=self._job,
                        creating_processor=self._detector)

                    # We must create the clip audio file after creating
                    # the clip row in the database. The file's path
                    # depends on the clip ID, which is set as part of
                    # creating the clip row.
                    #
                    # We create the audio file within the database
                    # transaction to ensure that the clip row and
                    # audio file are created atomically.
                    if self._create_clip_files:
                        self._clip_manager.create_audio_file(clip, samples)

        except Exception as e:
            self._logger.error(
                ('Attempt to create clip from file "{}" failed with message: '
                 '{}. File will be ignored.').format(file_path, str(e)))

        else:
            self._logger.info('Archived {} clip {}.'.format(self.name, clip))
Beispiel #4
0
def _create_job(command_spec, user):

    with archive_lock.atomic():
        job = Job.objects.create(command=json.dumps(
            command_spec, default=_json_date_serializer),
                                 creation_time=time_utils.get_utc_now(),
                                 creating_user=user,
                                 status='Unstarted')

    return job.id
Beispiel #5
0
def _create_job(command_spec, user):
    
    with archive_lock.atomic():
        job = Job.objects.create(
            command=json.dumps(command_spec, default=_json_date_serializer),
            creation_time=time_utils.get_utc_now(),
            creating_user=user,
            status='Not Started')
    
    return job.id
 def _adjust_clip(self, clip):
 
     try:
         with archive_lock.atomic():
             with transaction.atomic():
                 return self._adjust_clip_aux(clip)
                 
     except Exception as e:
         command_utils.log_and_reraise_fatal_exception(
             e, 'Processing of clip "{}"'.format(str(clip)),
             'The clip was not modified.')
    def _adjust_clip(self, clip):

        try:
            with archive_lock.atomic():
                with transaction.atomic():
                    return self._adjust_clip_aux(clip)

        except Exception as e:
            command_utils.log_and_reraise_fatal_exception(
                e, 'Processing of clip "{}"'.format(str(clip)),
                'The clip was not modified.')
 def _delete_recording(self, recording):
     
     self._logger.info('Deleting recording "{}"...'.format(str(recording)))
     
     with archive_lock.atomic():
         
         with transaction.atomic():
         
             # TODO: Consider moving file deletions outside of database
             # transaction.
             
             clips = Clip.objects.filter(
                 recording_channel__recording=recording)
             
             # Delete clip files.
             for clip in clips:
                 self._clip_manager.delete_audio_file(clip)
             
             recording.delete()
    def _add_channel_clip_start_indices(self, channel, detector):

        recording = channel.recording
        recording_start_time = recording.start_time
        recording_length = recording.length
        sample_rate = recording.sample_rate

        create_count_text = text_utils.create_count_text

        with archive_lock.atomic():

            with transaction.atomic():

                clips = Clip.objects.filter(recording_channel=channel,
                                            creating_processor=detector,
                                            start_index=None)

                num_clips = clips.count()
                num_clips_found = 0

                if num_clips != 0:

                    count_text = create_count_text(num_clips, 'clip')

                    self._logger.info(
                        f'Processing {count_text} for recording channel '
                        f'"{str(channel)}" and detector "{detector.name}"...')

                    start_time = recording_start_time
                    duration = datetime.timedelta(seconds=recording_length /
                                                  sample_rate)
                    end_time = start_time + duration

                    # self._logger.info(
                    #     f'    Recording has start time {str(start_time)} '
                    #     f'and end time {end_time}.')

                    for clip in clips:

                        result = self._find_clip_in_recording(clip, channel)

                        if not isinstance(result, str):
                            # found clip

                            # Get result parts. Note that the clip channel
                            # can change when the clip is found, since in
                            # some cases clips were attributed to the wrong
                            # recordings when the clips were imported. In
                            # one scenario, for example, a clip that was
                            # actually toward the beginning of the second
                            # of two contiguous recordings of a night was
                            # incorrectly assigned to the end of the first
                            # recording, since according to the purported
                            # start times and sample rates of the recordings
                            # the end of the first recording overlapped
                            # the start of the second recording in time.
                            samples, found_channel, start_index = result

                            # Get clip start time.
                            start_seconds = start_index / sample_rate
                            delta = datetime.timedelta(seconds=start_seconds)
                            if found_channel == channel:
                                start_time = recording_start_time + delta
                            else:
                                start_time = \
                                    found_channel.recording.start_time + delta

                            # Get change in clip start time.
                            start_time_change = \
                                (start_time - clip.start_time).total_seconds()
                            if start_time_change < self._min_start_time_change:
                                self._min_start_time_change = start_time_change
                            if start_time_change > self._max_start_time_change:
                                self._max_start_time_change = start_time_change

                            # Get clip length. The Old Bird detectors
                            # sometimes append zeros to a clip that were
                            # not in the recording that the clip refers
                            # to. We ignore the appended zeros.
                            length = len(samples)
                            duration = signal_utils.get_duration(
                                length, sample_rate)

                            # Get clip end time.
                            end_time = signal_utils.get_end_time(
                                start_time, length, sample_rate)

                            clip.channel = found_channel
                            clip.start_index = start_index
                            clip.length = length
                            clip.start_time = start_time
                            clip.end_time = end_time

                            if not self._dry_run:
                                clip.save()

                            num_clips_found += 1

                    if num_clips_found != num_clips:
                        self._log_clips_not_found(num_clips - num_clips_found)

                return num_clips, num_clips_found
Beispiel #10
0
    def _create_clips(self, threshold):

        if not _CREATE_CLIPS:
            return

        # TODO: Find out exactly what database queries are
        # executed during detection (ideally, record the sequence
        # of queries) to see if database interaction could be
        # made more efficient, for example with a cache.

        recording_channel = self._recording_channel
        detector_model = self._detector_model
        start_offset = self._file_start_index + self._interval_start_index
        creation_time = time_utils.get_utc_now()

        create_clip_files = self._create_clip_files

        if self._defer_clip_creation:

            for start_index, length, annotations in self._clips:
                start_index += start_offset
                clip = [
                    recording_channel.id, start_index, length, creation_time,
                    self._job.id, detector_model.id, annotations
                ]
                self._deferred_clips.append(clip)

        else:
            # database writes not deferred

            station = self._recording.station
            sample_rate = self._recording.sample_rate
            mic_output = recording_channel.mic_output

            if create_clip_files:
                clips = []

            # Create database records for current batch of clips in one
            # database transaction.

#             trans_start_time = time.time()

            try:

                with archive_lock.atomic(), transaction.atomic():

                    for start_index, length, annotations in self._clips:

                        # Get clip start time as a `datetime`.
                        start_index += start_offset
                        start_delta = datetime.timedelta(seconds=start_index /
                                                         sample_rate)
                        start_time = \
                            self._recording.start_time + start_delta

                        end_time = signal_utils.get_end_time(
                            start_time, length, sample_rate)

                        try:

                            # It would be nice to use Django's
                            # `bulk_create` here, but unfortunately that
                            # won't automatically set clip IDs for us
                            # except (as of this writing) if we're using
                            # PostgreSQL.
                            clip = Clip.objects.create(
                                station=station,
                                mic_output=mic_output,
                                recording_channel=recording_channel,
                                start_index=start_index,
                                length=length,
                                sample_rate=sample_rate,
                                start_time=start_time,
                                end_time=end_time,
                                date=station.get_night(start_time),
                                creation_time=creation_time,
                                creating_user=None,
                                creating_job=self._job,
                                creating_processor=detector_model)

                            if create_clip_files:

                                # Save clip so we can create clip file
                                # outside of transaction.
                                clips.append(clip)

                            if annotations is not None:

                                for name, value in annotations.items():

                                    annotation_info = \
                                        self._get_annotation_info(name)

                                    model_utils.annotate_clip(
                                        clip,
                                        annotation_info,
                                        str(value),
                                        creation_time=creation_time,
                                        creating_user=None,
                                        creating_job=self._job,
                                        creating_processor=detector_model)

                        except Exception as e:

                            # Note that it's important not to perform any
                            # database queries here. If the database raised
                            # the exception, we have to wait until we're
                            # outside of the transaction to query the
                            # database again.
                            raise _ClipCreationError(e)

#                     trans_end_time = time.time()
#                     self._num_transactions += 1
#                     self._total_transactions_duration += \
#                         trans_end_time - trans_start_time

            except _ClipCreationError as e:

                duration = signal_utils.get_duration(length, sample_rate)

                clip_string = Clip.get_string(station.name, mic_output.name,
                                              detector_model.name, start_time,
                                              duration)

                batch_size = len(self._clips)
                self._num_database_failures += batch_size

                if batch_size == 1:
                    prefix = 'Clip'
                else:
                    prefix = f'All {batch_size} clips in this batch'

                self._logger.error(
                    f'            Attempt to create clip {clip_string} '
                    f'failed with message: {str(e.wrapped_exception)}. '
                    f'{prefix} will be ignored.')

            else:
                # clip creation succeeded

                if create_clip_files:

                    for clip in clips:

                        try:
                            self._clip_manager.create_audio_file(clip)

                        except Exception as e:
                            self._num_file_failures += 1
                            self._logger.error(
                                ('            Attempt to create audio file '
                                 'for clip {} failed with message: {} Clip '
                                 'database record was still created.').format(
                                     str(clip), str(e)))

        self._clips = []
Beispiel #11
0
def run_job(job_info):
    
    """
    Runs a job in a new process.
    
    This function is executed by the Vesper job manager each time it
    starts a new job. The function is executed in a new process, called
    the *main job process* of the job.
    
    The function sets up Django and configures the root logger for the
    main job process, constructs the command to be executed, and invokes
    the command's `execute` method. Logging is shut down after that method
    returns.
    
    Parameters:
    
        job_info : `Bunch`
            information pertaining to the new job.
            
            The information includes the command specification for the
            new job, the ID of the Django Job model instance for the job,
            and the stop event for the job.
            
            The information includes the ID of the Django job model
            instance rather than the instance itself so that the job
            info can be unpickled in the new process without first
            setting up Django.
            
            This object is *not* of type `vesper.command.job_info.JobInfo`,
            which contains somewhat different (though overlapping)
            information. This function invokes the `execute` method of the
            command of the new job with an argument of type
            `vesper.command.job_info.JobInfo`.
    """
    
    # Set up Django for the main job process. We must do this before we try
    # to use anything Django (e.g. the ORM) in the new process. We perform
    # the setup inside of this function rather than at the top of this module
    # so that it happens only in a new job process, and not in a parent
    # process that is importing this module merely to be able to execute
    # this function. In the latter case Django will already have been set up
    # in the importing process if it is needed, and it would be redundant
    # and potentially problematic to perform the setup again.
    django_utils.set_up_django()
    
    # These imports are here rather than at the top of this module so
    # they will be executed after Django is set up in the main job process.
    # from django.conf import settings as django_settings
    from vesper.django.app.models import Job
    import vesper.util.archive_lock as archive_lock
    
    # Set the archive lock for this process. The lock is provided to
    # this process by its creator.
    archive_lock.set_lock(job_info.archive_lock)

    # Get the Django model instance for this job.
    job = Job.objects.get(id=job_info.job_id)
    
    # Start up logging.
    # level = logging.DEBUG if django_settings.DEBUG else logging.INFO
    level = logging.INFO
    logging_manager = JobLoggingManager(job, level)
    logging_manager.start_up_logging()
    
    # Configure root logger for the main job process.
    logger = logging.getLogger()
    logging_config = logging_manager.logging_config
    JobLoggingManager.configure_logger(logger, logging_config)
    
    try:
        
        # Mark job as running.
        job.start_time = time_utils.get_utc_now()
        job.status = 'Running'
        with archive_lock.atomic():
            job.save()
        
        # Create command from command spec.
        command = _create_command(job_info.command_spec)
        
        prefix = 'Job started for command "{}"'.format(command.name)
        
        if len(command.arguments) == 0:
            logger.info('{} with no arguments.'.format(prefix))
                        
        else:
            
            logger.info('{} with arguments:'.format(prefix))
            
            # Log command arguments JSON.
            args = json.dumps(
                command.arguments, sort_keys=False, indent=4,
                cls=_CommandArgsEncoder)
            lines = args.splitlines()
            for line in lines:
                logger.info('    {}'.format(line))
    
        # Execute command.
        info = JobInfo(job_info.job_id, logging_config, job_info.stop_event)
        complete = command.execute(info)
        
    except Exception:
        
        # Update job status and log error message
        
        job.end_time = time_utils.get_utc_now()
        job.status = 'Raised Exception'
        with archive_lock.atomic():
            job.save()
        
        logger.error(
            'Job raised exception. See traceback below.\n' +
            traceback.format_exc())
        
    else:
        
        # Update job status and log final message.
        
        status = 'Complete' if complete else 'Interrupted'

        job.end_time = time_utils.get_utc_now()
        job.status = status
        with archive_lock.atomic():
            job.save()
        
        logger.info('Job {}.'.format(status.lower()))
        
    finally:
        
        # At one point this `finally` clause attempted to log a final
        # message that included counts of the critical, error, and
        # warning log messages that had been logged for this job.
        # This didn't work, however, due to a race condition. In
        # particular, there seemed to be no way for this thread to
        # ensure that all log records other than the one that it was
        # preparing had been processed by the logging thread before
        # this thread read the record counts. If all of the log
        # records had not been processed, the counts were inaccurate.
        #
        # In the future we may add record count fields to the Django
        # `Job` class so that accurate log record counts can be
        # reported in log displays. See record counts handler
        # TODO in `job_logging_manager` module for more detail.
        
        logging_manager.shut_down_logging()
Beispiel #12
0
def run_job(job_info):
    """
    Runs a job in a new process.
    
    This function is executed by the Vesper job manager each time it
    starts a new job. The function is executed in a new process, called
    the *main job process* of the job.
    
    The function sets up Django and configures the root logger for the
    main job process, constructs the command to be executed, and invokes
    the command's `execute` method. Logging is shut down after that method
    returns.
    
    Parameters:
    
        job_info : `Bunch`
            information pertaining to the new job.
            
            The information includes the command specification for the
            new job, the ID of the Django Job model instance for the job,
            and the stop event for the job.
            
            The information includes the ID of the Django job model
            instance rather than the instance itself so that the job
            info can be unpickled in the new process without first
            setting up Django.
            
            This object is *not* of type `vesper.command.job_info.JobInfo`,
            which contains somewhat different (though overlapping)
            information. This function invokes the `execute` method of the
            command of the new job with an argument of type
            `vesper.command.job_info.JobInfo`.
    """

    # Set up Django for the main job process. We must do this before we try
    # to use anything Django (e.g. the ORM) in the new process. We perform
    # the setup inside of this function rather than at the top of this module
    # so that it happens only in a new job process, and not in a parent
    # process that is importing this module merely to be able to execute
    # this function. In the latter case Django will already have been set up
    # in the importing process if it is needed, and it would be redundant
    # and potentially problematic to perform the setup again.
    django_utils.set_up_django()

    # These imports are here rather than at the top of this module so
    # they will be executed after Django is set up in the main job process.
    # from django.conf import settings as django_settings
    from vesper.django.app.models import Job
    import vesper.util.archive_lock as archive_lock

    # Set the archive lock for this process. The lock is provided to
    # this process by its creator.
    archive_lock.set_lock(job_info.archive_lock)

    # Get the Django model instance for this job.
    job = Job.objects.get(id=job_info.job_id)

    # Start up logging.
    # level = logging.DEBUG if django_settings.DEBUG else logging.INFO
    level = logging.INFO
    logging_manager = JobLoggingManager(job, level)
    logging_manager.start_up_logging()

    # Configure root logger for the main job process.
    logger = logging.getLogger()
    logging_config = logging_manager.logging_config
    JobLoggingManager.configure_logger(logger, logging_config)

    try:

        # Mark job as running.
        job.start_time = time_utils.get_utc_now()
        job.status = 'Running'
        with archive_lock.atomic():
            job.save()

        # Create command from command spec.
        command = _create_command(job_info.command_spec)

        prefix = 'Job started for command "{}"'.format(command.name)

        if len(command.arguments) == 0:
            logger.info('{} with no arguments.'.format(prefix))

        else:

            logger.info('{} with arguments:'.format(prefix))

            # Log command arguments JSON.
            args = json.dumps(command.arguments,
                              sort_keys=False,
                              indent=4,
                              cls=_CommandArgsEncoder)
            lines = args.splitlines()
            for line in lines:
                logger.info('    {}'.format(line))

        # Execute command.
        info = JobInfo(job_info.job_id, logging_config, job_info.stop_event)
        complete = command.execute(info)

    except Exception:

        # Update job status and log error message

        job.end_time = time_utils.get_utc_now()
        job.status = 'Failed'
        with archive_lock.atomic():
            job.save()

        logger.error('Job failed with an exception. See traceback below.\n' +
                     traceback.format_exc())

    else:

        # Update job status and log final message.

        status = 'Completed' if complete else 'Interrupted'

        job.end_time = time_utils.get_utc_now()
        job.status = status
        with archive_lock.atomic():
            job.save()

        logger.info('Job {}.'.format(status.lower()))

    finally:

        # At one point this `finally` clause attempted to log a final
        # message that included counts of the critical, error, and
        # warning log messages that had been logged for this job.
        # This didn't work, however, due to a race condition. In
        # particular, there seemed to be no way for this thread to
        # ensure that all log records other than the one that it was
        # preparing had been processed by the logging thread before
        # this thread read the record counts. If all of the log
        # records had not been processed, the counts were inaccurate.
        #
        # In the future we may add record count fields to the Django
        # `Job` class so that accurate log record counts can be
        # reported in log displays. See record counts handler
        # TODO in `job_logging_manager` module for more detail.

        logging_manager.shut_down_logging()
Beispiel #13
0
    def _create_clips(self, threshold):
        
        if not _CREATE_CLIPS:
            return
        
        # TODO: Find out exactly what database queries are
        # executed during detection (ideally, record the sequence
        # of queries) to see if database interaction could be
        # made more efficient, for example with a cache.
        
        recording_channel = self._recording_channel
        detector_model = self._detector_model
        start_offset = self._file_start_index + self._interval_start_index
        creation_time = time_utils.get_utc_now()
        
        create_clip_files = self._create_clip_files
        
        if self._defer_clip_creation:
            
            for start_index, length, annotations in self._clips:
                start_index += start_offset
                clip = [
                    recording_channel.id, start_index, length, creation_time,
                    self._job.id, detector_model.id, annotations]
                self._deferred_clips.append(clip)
                
        else:
            # database writes not deferred
                
            station = self._recording.station
            sample_rate = self._recording.sample_rate
            mic_output = recording_channel.mic_output
        
            if create_clip_files:
                clips = []
             
            # Create database records for current batch of clips in one
            # database transaction.
            
#             trans_start_time = time.time()
            
            try:
                
                with archive_lock.atomic(), transaction.atomic():
                    
                    for start_index, length, annotations in self._clips:
                        
                        try:
                        
                            # Get clip start time as a `datetime`.
                            start_index += start_offset
                            start_delta = datetime.timedelta(
                                seconds=start_index / sample_rate)
                            start_time = \
                                self._recording.start_time + start_delta
                             
                            end_time = signal_utils.get_end_time(
                                start_time, length, sample_rate)
                         
                            # It would be nice to use Django's
                            # `bulk_create` here, but unfortunately that
                            # won't automatically set clip IDs for us
                            # except (as of this writing) if we're using
                            # PostgreSQL.
                            clip = Clip.objects.create(
                                station=station,
                                mic_output=mic_output,
                                recording_channel=recording_channel,
                                start_index=start_index,
                                length=length,
                                sample_rate=sample_rate,
                                start_time=start_time,
                                end_time=end_time,
                                date=station.get_night(start_time),
                                creation_time=creation_time,
                                creating_user=None,
                                creating_job=self._job,
                                creating_processor=detector_model
                            )
                            
                            if create_clip_files:
                                
                                # Save clip so we can create clip file
                                # outside of transaction.
                                clips.append(clip)
                                
                            if annotations is not None:
                                
                                for name, value in annotations.items():
                                    
                                    annotation_info = \
                                        self._get_annotation_info(name)
                                    
                                    model_utils.annotate_clip(
                                        clip, annotation_info, str(value),
                                        creation_time=creation_time,
                                        creating_user=None,
                                        creating_job=self._job,
                                        creating_processor=detector_model)
                        
                        except Exception as e:
                            
                            duration = signal_utils.get_duration(
                                length, sample_rate)
                                
                            clip_string = Clip.get_string(
                                station.name, mic_output.name,
                                detector_model.name, start_time, duration)
                
                            raise _ClipCreationError(clip_string, e)

#                     trans_end_time = time.time()
#                     self._num_transactions += 1
#                     self._total_transactions_duration += \
#                         trans_end_time - trans_start_time
            
            except _ClipCreationError as e:
                
                batch_size = len(self._clips)
                self._num_database_failures += batch_size
                
                if batch_size == 1:
                    prefix = 'Clip'
                else:
                    prefix = 'All {} clips in this batch'.format(
                        batch_size)
                    
                self._logger.error((
                    '            Attempt to create clip {} failed with '
                    'message: {} {} will be ignored.').format(
                        clip_string, str(e.wrapped_exception), prefix))

            else:
                # clip creation succeeded
                
                if create_clip_files:
                
                    for clip in clips:
                        
                        try:
                            self._clip_manager.create_audio_file(clip)
                            
                        except Exception as e:
                            self._num_file_failures += 1
                            self._logger.error((
                                '            Attempt to create audio file '
                                'for clip {} failed with message: {} Clip '
                                'database record was still created.').format(
                                    str(clip), str(e)))
                            
        self._clips = []
Beispiel #14
0
 def _add_channel_clip_start_indices(self, channel, detector):
     
     # Stash some data as object attributes so we don't have to
     # repeatedly pass them to `_find_clip_in_recording_channel`
     # method or query database there.
     recording = channel.recording
     self._recording_start_time = recording.start_time
     self._recording_length = recording.length
     self._sample_rate = recording.sample_rate
     self._channel_num = channel.channel_num
                 
     create_count_text = text_utils.create_count_text
     
     with archive_lock.atomic():
         
         with transaction.atomic():
         
             clips = Clip.objects.filter(
                 recording_channel=channel,
                 creating_processor=detector,
                 start_index=None)
             
             num_clips = clips.count()
             num_clips_found = 0
             
             if num_clips != 0:
                 
                 count_text = create_count_text(num_clips, 'clip')
                     
                 self._logger.info(
                     f'Processing {count_text} for recording channel '
                     f'"{str(channel)}" and detector "{detector.name}...')
                     
                 for clip in clips:
                     
                     result = self._find_clip_in_recording_channel(clip)
                     
                     if result is not None:
                         
                         start_index = result[1]
                         
                         start_seconds = start_index / self._sample_rate
                         delta = datetime.timedelta(seconds=start_seconds)
                         start_time = self._recording_start_time + delta
                         
                         end_time = signal_utils.get_end_time(
                             start_time, clip.length, self._sample_rate)
                         
                         start_time_change = \
                             (start_time - clip.start_time).total_seconds()
                             
                         duration = (clip.length - 1) / self._sample_rate
                         
                         self._logger.info(
                             f'    {start_index} {str(clip.start_time)} '
                             f'-> {str(start_time)} {start_time_change} '
                             f'{duration} {str(end_time)}')
                         
                         clip.start_index = start_index
                         clip.start_time = start_time
                         clip.end_time = end_time
                             
                         if not self._dry_run:
                             clip.save()
                         
                         num_clips_found += 1
                         
                 if num_clips_found != num_clips:
                     self._log_clips_not_found(num_clips - num_clips_found)
                     
             return num_clips, num_clips_found