def operation(self): current_time = time.time() error_msg = "" for job in self.jobs_list: execution_time = 0 if job.running: execution_time = current_time - job.start_time time_from_last_failed_check = current_time - job.last_monitor_check_failed if (execution_time > job.monitor_interval and time_from_last_failed_check > job.monitor_interval): job.last_monitor_check_failed = current_time if execution_time < 60: error = "Job %s is taking more than %d seconds to execute" % (job.name, execution_time) elif execution_time < 3600: minutes = int(execution_time / 60) error = "Job %s is taking more than %d minutes to execute" % (job.name, minutes) else: hours = int(execution_time / 3600) error = "Job %s is taking more than %d hours to execute" % (job.name, hours) error_msg += '\n' + error log.err(error) if error_msg != "": send_exception_email(error)
def operation(self): current_time = time.time() error_msg = "" for job in self.jobs_list: execution_time = 0 if job.running: execution_time = current_time - job.start_time time_from_last_failed_check = current_time - job.last_monitor_check_failed if (execution_time > job.monitor_interval and time_from_last_failed_check > job.monitor_interval): job.last_monitor_check_failed = current_time if execution_time < 60: error = "Job %s is taking more than %d seconds to execute" % ( job.name, execution_time) elif execution_time < 3600: minutes = int(execution_time / 60) error = "Job %s is taking more than %d minutes to execute" % ( job.name, minutes) else: hours = int(execution_time / 3600) error = "Job %s is taking more than %d hours to execute" % ( job.name, hours) error_msg += '\n' + error log.err(error) if error_msg != "": send_exception_email(error)
def handler_time_analysis_end(self): current_run_time = time.time() - self.start_time if current_run_time > self.handler_exec_time_threshold: error = "Handler [%s] exceeded exec threshold with an execution time of %.2f seconds" % (self.name, current_run_time) log.err(error) send_exception_email(error)
def monitor_fun(self): self.monitor_runs += 1 elapsed_time = self.monitor_time * self.monitor_runs if elapsed_time < 60: error = "Job %s is taking more than %d seconds to execute" % (self.name, elapsed_time) elif elapsed_time < 3600: minutes = int(elapsed_time / 60) error = "Job %s is taking more than %d minutes to execute" % (self.name, minutes) else: hours = int(elapsed_time / 3600) error = "Job %s is taking more than %d hours to execute" % (self.name, hours) log.err(error) send_exception_email(error)
def execution_check(self): self.request.execution_time = datetime.now() - self.request.start_time if self.request.execution_time.seconds > self.handler_exec_time_threshold: error = "Handler [%s] exceeded execution threshold (of %d secs) with an execution time of %.2f seconds" % \ (self.name, self.handler_exec_time_threshold, self.request.execution_time.seconds) log.err(error) send_exception_email(error) track_handler(self) if self.uniform_answer_time: needed_delay = (GLSettings.side_channels_guard - (self.request.execution_time.microseconds / 1000)) / 1000 if needed_delay > 0: yield deferred_sleep(needed_delay)
def post(self): """ """ request = self.validate_message(self.request.body, requests.ExceptionDesc) if not GLSettings.disable_client_exception_notification: exception_email = "Exception generated by client: %s\n\n" % request['agent'] exception_email += "URL: %s\n\n" % request['errorUrl'] exception_email += "Error Message: %s\n\n" % request['errorMessage'] exception_email += "Stacktrace:\n" exception_email += json.dumps(request['stackTrace'], indent=2) send_exception_email(exception_email) log.debug("Received client exception and notified to exception email") self.set_status(201) # Created
def tooMuch(self): self.run += 1 self.elapsed_time = self.monitor_time * self.run if (self.elapsed_time > 3600): hours = int(self.elapsed_time / 3600) error = "Warning: [%s] is taking more than %d hours to execute" % (self.resource_name, hours) if (self.elapsed_time > 60): minutes = int(self.elapsed_time / 60) error = "Warning: [%s] is taking more than %d minutes to execute" % (self.resource_name, minutes) else: error = "Warning: [%s] is taking more than %d seconds to execute" % (self.resource_name, self.elapsed_time) log.err(error) send_exception_email(error)
def post(self): """ """ request = self.validate_message(self.request.content.read(), requests.ExceptionDesc) if not GLSettings.disable_client_exception_notification: exception_email = "URL: %s\n\n" % request['errorUrl'] exception_email += "User Agent: %s\n\n" % request['agent'] exception_email += "Error Message: %s\n\n" % request['errorMessage'] exception_email += "Stacktrace:\n" exception_email += json.dumps(request['stackTrace'], indent=2) send_exception_email(exception_email) log.debug( "Received client exception and passed error to exception mail handler" )
def tooMuch(self): self.run += 1 self.elapsed_time = self.monitor_time * self.run if (self.elapsed_time > 3600): hours = int(self.elapsed_time / 3600) error = "Warning: [%s] is taking more than %d hours to execute; killing it." % (self.job.name, hours) self.job.stop() if (self.elapsed_time > 60): minutes = int(self.elapsed_time / 60) error = "Warning: [%s] is taking more than %d minutes to execute" % (self.job.name, minutes) else: error = "Warning: [%s] is taking more than %d seconds to execute" % (self.job.name, self.elapsed_time) log.err(error) send_exception_email(error, mail_reason="Job Time Exceeded")
def post(self): """ """ request = self.validate_message(self.request.body, requests.ExceptionDesc) if not GLSettings.disable_client_exception_notification: exception_email = "URL: %s\n\n" % request['errorUrl'] exception_email += "User Agent: %s\n\n" % request['agent'] exception_email += "Error Message: %s\n\n" % request['errorMessage'] exception_email += "Stacktrace:\n" exception_email += json.dumps(request['stackTrace'], indent=2) send_exception_email(exception_email) log.debug( "Received client exception and notified to exception email") self.set_status(201) # Created
def handler_time_analysis_end(self): """ If the software is running with the option -S --stats (GLSetting.log_timing_stats) then we are doing performance testing, having our mailbox spammed is not important, so we just skip to report the anomaly. """ current_run_time = time.time() - self.start_time if current_run_time > self.handler_exec_time_threshold: error = "Handler [%s] exceeded execution threshold (of %d secs) with an execution time of %.2f seconds" % \ (self.name, self.handler_exec_time_threshold, current_run_time) log.err(error) send_exception_email(error) if GLSettings.log_timing_stats: TimingStatsHandler.log_measured_timing(self.request.method, self.request.uri, self.start_time, current_run_time)
def handler_time_analysis_end(self): """ If the software is running with the option -S --stats (GLSetting.log_timing_stats) then we are doing performance testing, having our mailbox spammed is not important, so we just skip to report the anomaly. """ current_run_time = time.time() - self.start_time if current_run_time > self.handler_exec_time_threshold: error = "Handler [%s] exceeded exec threshold (of %d secs) with an execution time of %.2f seconds" % \ (self.name, self.handler_exec_time_threshold, current_run_time) log.err(error) send_exception_email(error, mail_reason="Handler Time Exceeded") if GLSettings.log_timing_stats: TimingStatsHandler.log_measured_timing(self.request.method, self.request.uri, self.start_time, current_run_time)
def tooMuch(self): self.run += 1 self.elapsed_time = self.monitor_time * self.run if (self.elapsed_time > 3600): hours = int(self.elapsed_time / 3600) error = "Warning: [%s] is taking more than %d hours to execute" % ( self.resource_name, hours) if (self.elapsed_time > 60): minutes = int(self.elapsed_time / 60) error = "Warning: [%s] is taking more than %d minutes to execute" % ( self.resource_name, minutes) else: error = "Warning: [%s] is taking more than %d seconds to execute" % ( self.resource_name, self.elapsed_time) log.err(error) send_exception_email(error)
def tooMuch(self): self.run += 1 self.elapsed_time = self.monitor_time * self.run if (self.elapsed_time > 3600): hours = int(self.elapsed_time / 3600) error = "Warning: [%s] is taking more than %d hours to execute; killing it." % ( self.job.name, hours) self.job.stop() if (self.elapsed_time > 60): minutes = int(self.elapsed_time / 60) error = "Warning: [%s] is taking more than %d minutes to execute" % ( self.job.name, minutes) else: error = "Warning: [%s] is taking more than %d seconds to execute" % ( self.job.name, self.elapsed_time) log.err(error) send_exception_email(error, mail_reason="Job Time Exceeded")
def receiverfile_planning(store): """ This function roll over the InternalFile uploaded, extract a path, id and receivers associated, one entry for each combination. representing the ReceiverFile that need to be created. """ receiverfiles_maps = {} for ifile in store.find(InternalFile, InternalFile.new == True): if ifile.processing_attempts >= INTERNALFILES_HANDLE_RETRY_MAX: ifile.new = False error = "Failed to handle receiverfiles creation for ifile %s (%d retries)" % \ (ifile.id, INTERNALFILES_HANDLE_RETRY_MAX) log.err(error) send_exception_email(error) continue elif ifile.processing_attempts >= 1: log.err( "Failed to handle receiverfiles creation for ifile %s (retry %d/%d)" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) if ifile.processing_attempts: log.debug( "Starting handling receiverfiles creation for ifile %s retry %d/%d" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) ifile.processing_attempts += 1 for receiver in ifile.internaltip.receivers: rtrf = store.find( ReceiverTip, ReceiverTip.internaltip_id == ifile.internaltip_id, ReceiverTip.receiver_id == receiver.id).one() receiverfile = ReceiverFile() receiverfile.receiver_id = receiver.id receiverfile.internaltip_id = ifile.internaltip_id receiverfile.internalfile_id = ifile.id receiverfile.receivertip_id = rtrf.id receiverfile.file_path = ifile.file_path receiverfile.size = ifile.size receiverfile.status = u'processing' # https://github.com/globaleaks/GlobaLeaks/issues/444 # avoid to mark the receiverfile as new if it is part of a submission # this way we avoid to send unuseful messages receiverfile.new = False if ifile.submission else True store.add(receiverfile) if ifile.id not in receiverfiles_maps: receiverfiles_maps[ifile.id] = { 'plaintext_file_needed': False, 'ifile_id': ifile.id, 'ifile_path': ifile.file_path, 'ifile_size': ifile.size, 'rfiles': [] } receiverfiles_maps[ifile.id]['rfiles'].append({ 'id': receiverfile.id, 'status': u'processing', 'path': ifile.file_path, 'size': ifile.size, 'receiver': admin_serialize_receiver( receiver, GLSettings.memory_copy.default_language) }) return receiverfiles_maps
def on_error(self, exc): error = "Job %s died with runtime %.4f [low: %.4f, high: %.4f]" % \ (self.name, self.mean_time, self.low_time, self.high_time) log.err(error) send_exception_email(error)
def _errback(self, loopingCall): error = "Job %s died with runtime %.4f [low: %.4f, high: %.4f]" % \ (self.name, self.mean_time, self.low_time, self.high_time) log.err(error) send_exception_email(error)
def receiverfile_planning(store): """ This function roll over the InternalFile uploaded, extract a path, id and receivers associated, one entry for each combination. representing the ReceiverFile that need to be created. """ receiverfiles_maps = {} ifilescnt = store.find(InternalFile, InternalFile.new == True).count() ifiles = store.find( InternalFile, InternalFile.new == True)[:GLSettings.jobs_operation_limit] if ifilescnt > GLSettings.jobs_operation_limit: log.debug( "Delivery iterating over %d InternalFile from a Queue of %d" % (GLSettings.jobs_operation_limit, ifilescnt)) elif ifilescnt: log.debug("Delivery iterating over %d InternalFile" % ifilescnt) else: pass # 0 files to be processed for ifile in ifiles: if (ifile.processing_attempts >= INTERNALFILES_HANDLE_RETRY_MAX): ifile.new = False error = "Failed to handle receiverfiles creation for ifilee %s (%d retries)" % \ (ifile.id, INTERNALFILES_HANDLE_RETRY_MAX) log.err(error) send_exception_email(error) continue elif (ifile.processing_attempts >= 1): log.err( "Failed to handle receiverfiles creation for ifile %s (retry %d/%d)" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) if ifile.processing_attempts: log.debug( "Starting handling receiverfiles creation for ifile %s retry %d/%d" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) ifile.processing_attempts = ifile.processing_attempts + 1 for receiver in ifile.internaltip.receivers: rtrf = store.find( ReceiverTip, ReceiverTip.internaltip_id == ifile.internaltip_id, ReceiverTip.receiver_id == receiver.id).one() receiverfile = ReceiverFile() receiverfile.receiver_id = receiver.id receiverfile.internaltip_id = ifile.internaltip_id receiverfile.internalfile_id = ifile.id receiverfile.receivertip_id = rtrf.id receiverfile.file_path = ifile.file_path receiverfile.size = ifile.size receiverfile.status = u'processing' store.add(receiverfile) if ifile.id not in receiverfiles_maps: receiverfiles_maps[ifile.id] = { 'plaintext_file_needed': False, 'ifile_id': ifile.id, 'ifile_path': ifile.file_path, 'ifile_size': ifile.size, 'rfiles': [] } receiverfiles_maps[ifile.id]['rfiles'].append({ 'id': receiverfile.id, 'status': u'processing', 'path': ifile.file_path, 'size': ifile.size, 'receiver': admin_serialize_receiver( receiver, GLSettings.memory_copy.default_language) }) return receiverfiles_maps
def dead_fun(self, loopingCall): error = "Job %s is died with runtime %.4f [iterations: %d, low: %.4f, high: %.4f]" % \ (self.name, self.mean_time, self.job_runs, self.low_time, self.high_time) log.err(error) send_exception_email(error)
def receiverfile_planning(store): """ This function roll over the InternalFile uploaded, extract a path, id and receivers associated, one entry for each combination. representing the ReceiverFile that need to be created. """ receiverfiles_maps = {} ifilescnt = store.find(InternalFile, InternalFile.new == True).count() ifiles = store.find(InternalFile, InternalFile.new == True)[:GLSettings.jobs_operation_limit] if ifilescnt > GLSettings.jobs_operation_limit: log.debug("Delivery iterating over %d InternalFile from a queue of %d" % ( GLSettings.jobs_operation_limit, ifilescnt)) elif ifilescnt: log.debug("Delivery iterating over %d InternalFile" % ifilescnt) else: pass # 0 files to be processed for ifile in ifiles: if ifile.processing_attempts >= INTERNALFILES_HANDLE_RETRY_MAX: ifile.new = False error = "Failed to handle receiverfiles creation for ifile %s (%d retries)" % \ (ifile.id, INTERNALFILES_HANDLE_RETRY_MAX) log.err(error) send_exception_email(error) continue elif ifile.processing_attempts >= 1: log.err("Failed to handle receiverfiles creation for ifile %s (retry %d/%d)" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) if ifile.processing_attempts: log.debug("Starting handling receiverfiles creation for ifile %s retry %d/%d" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) ifile.processing_attempts = ifile.processing_attempts + 1 for receiver in ifile.internaltip.receivers: rtrf = store.find(ReceiverTip, ReceiverTip.internaltip_id == ifile.internaltip_id, ReceiverTip.receiver_id == receiver.id).one() receiverfile = ReceiverFile() receiverfile.receiver_id = receiver.id receiverfile.internaltip_id = ifile.internaltip_id receiverfile.internalfile_id = ifile.id receiverfile.receivertip_id = rtrf.id receiverfile.file_path = ifile.file_path receiverfile.size = ifile.size receiverfile.status = u'processing' store.add(receiverfile) if ifile.id not in receiverfiles_maps: receiverfiles_maps[ifile.id] = { 'plaintext_file_needed': False, 'ifile_id': ifile.id, 'ifile_path': ifile.file_path, 'ifile_size': ifile.size, 'rfiles': [] } receiverfiles_maps[ifile.id]['rfiles'].append({ 'id': receiverfile.id, 'status': u'processing', 'path': ifile.file_path, 'size': ifile.size, 'receiver': admin_serialize_receiver(receiver, GLSettings.memory_copy.default_language) }) return receiverfiles_maps
def receiverfile_planning(store): """ This function roll over the InternalFile uploaded, extract a path, id and receivers associated, one entry for each combination. representing the ReceiverFile that need to be created. """ receiverfiles_maps = {} for ifile in store.find(InternalFile, InternalFile.new == True): if ifile.processing_attempts >= INTERNALFILES_HANDLE_RETRY_MAX: ifile.new = False error = "Failed to handle receiverfiles creation for ifile %s (%d retries)" % \ (ifile.id, INTERNALFILES_HANDLE_RETRY_MAX) log.err(error) send_exception_email(error) continue elif ifile.processing_attempts >= 1: log.err("Failed to handle receiverfiles creation for ifile %s (retry %d/%d)" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) if ifile.processing_attempts: log.debug("Starting handling receiverfiles creation for ifile %s retry %d/%d" % (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX)) ifile.processing_attempts += 1 for receiver in ifile.internaltip.receivers: rtrf = store.find(ReceiverTip, ReceiverTip.internaltip_id == ifile.internaltip_id, ReceiverTip.receiver_id == receiver.id).one() receiverfile = ReceiverFile() receiverfile.receiver_id = receiver.id receiverfile.internaltip_id = ifile.internaltip_id receiverfile.internalfile_id = ifile.id receiverfile.receivertip_id = rtrf.id receiverfile.file_path = ifile.file_path receiverfile.size = ifile.size receiverfile.status = u'processing' # https://github.com/globaleaks/GlobaLeaks/issues/444 # avoid to mark the receiverfile as new if it is part of a submission # this way we avoid to send unuseful messages receiverfile.new = False if ifile.submission else True store.add(receiverfile) if ifile.id not in receiverfiles_maps: receiverfiles_maps[ifile.id] = { 'plaintext_file_needed': False, 'ifile_id': ifile.id, 'ifile_path': ifile.file_path, 'ifile_size': ifile.size, 'rfiles': [] } receiverfiles_maps[ifile.id]['rfiles'].append({ 'id': receiverfile.id, 'status': u'processing', 'path': ifile.file_path, 'size': ifile.size, 'receiver': admin_serialize_receiver(receiver, GLSettings.memory_copy.default_language) }) return receiverfiles_maps