def finish_job(self, job): """! @brief Retry on failure. """ if not job.complete(): raise eva.exceptions.RetryException( "GridPP post-processing of '%(input.file)s' to '%(output.file)s' failed." % job.gridpp_params )
def finish_job(self, job): """! @brief Retry on failures. """ if not job.complete(): raise eva.exceptions.RetryException( "FIMEX could not fill file '%s' with data from '%s'." % (job.output_filename, job.input_filename) )
def finish_job(self, job): """! @brief Retry deletion on failures, and report metrics to statsd. """ if not job.complete(): raise eva.exceptions.RetryException("%s: deleting files failed." % job.resource) self.statsd.incr('deleted_datainstances', len(job.instance_list))
def finish_job(self, job): if not job.complete(): raise eva.exceptions.RetryException( "Processing of %s to output directory %s failed." % (job.resource.url, job.output_directory) ) try: job.output_files = self.parse_file_recognition_output(job.stdout) except: raise eva.exceptions.RetryException( "Processing of %s did not produce any legible output; expecting a list of file names and NetCDF time variables in standard output." % job.resource.url )
def finish_job(self, job): # Running the job populated `job.status`. You should always check this variable. # Throwing a RetryException will ensure that processing is retried. if not job.complete(): raise eva.exceptions.RetryException( "Processing of '%s' failed." % job.output_filename ) # We might want to register our completed data instance with Productstatus. # It is, however, not required. We can skip this step for now. if not self.post_to_productstatus: return # Here ends your responsibility. The code for registering new products # with Productstatus is added by IT-GEO. self.require_productstatus_credentials()
def finish_job(self, job): if not job.complete(): raise eva.exceptions.RetryException("Download of '%s' failed." % job.resource.url) if self.env['EVA_OUTPUT_SERVICE_BACKEND']: self.service_backend = self.api.servicebackend[self.env['EVA_OUTPUT_SERVICE_BACKEND']] else: self.service_backend = None bytes_sec = self.parse_bytes_sec_from_lines(job.stderr) if bytes_sec is not None: if self.service_backend is not None: self.statsd.gauge('download_rate', bytes_sec, {'service_backend': self.service_backend.slug}) else: self.statsd.gauge('download_rate', bytes_sec) job.logger.info('Download speed is %d bytes/sec', bytes_sec)
def generate_and_post_resources(self, job): """! @brief Given a finished Job object, post information to Productstatus about newly created resources. Performs a number of sanity checks before posting any information. """ if not self.post_to_productstatus(): job.logger.warning('Skipping post to Productstatus because of missing configuration.') return if not job.complete(): raise eva.exceptions.JobNotCompleteException('Refusing to post to Productstatus without a complete job.') try: job.logger.info('Generating Productstatus resources...') resources = self.default_resource_dictionary() self.generate_resources(job, resources) except ValueError: raise RuntimeError('generate_resources() did not return a tuple with three arrays, this is a bug in the code!') self.post_resources(job, resources) job.logger.info('Finished posting to Productstatus; all complete.')
def finish_job(self, job): if not job.complete(): raise eva.exceptions.RetryException("GRIB to NetCDF conversion of '%s' failed." % job.resource.url) job.logger.info('Successfully filled the NetCDF file %s with data from %s', job.data['filename'], job.resource.url)
def finish_job(self, job): """! @brief Ignore errors but log them. """ if not job.complete(): self.logger.error('THREDDS document could not be found; ignoring error condition.')
def finish_job(self, job): if not job.complete(): raise eva.exceptions.RetryException( 'Job did not finish successfully.') job.logger.info('Job has finished.')
def finish_job(self, job): if not job.complete(): raise eva.exceptions.RetryException("Distribution of '%s' to '%s' failed." % (job.input_file, job.output_file)) job.logger.info("The file '%s' has been successfully distributed to '%s'", job.input_file, job.output_file)
def finish_job(self, job): if not job.complete(): job.logger.error("md5sum checking of '%s' failed, skipping further processing!", job.resource.url) self.statsd.incr('md5sum_fail') return job.logger.info('DataInstance %s has md5sum hash %s.', job.resource, job.resource.hash)