def run(self): self.notification_center.post_notification('SIPEngineWillStart', sender=self) init_options = Engine.default_start_options.copy() init_options.update(self._options) try: self._ua = PJSIPUA(self._handle_event, **init_options) except Exception: log.exception('Exception occurred while starting the Engine') exc_type, exc_val, exc_tb = sys.exc_info() exc_tb = "".join(traceback.format_exception(exc_type, exc_val, exc_tb)) self.notification_center.post_notification('SIPEngineGotException', sender=self, data=NotificationData(type=exc_type, value=exc_val, traceback=exc_tb)) self.notification_center.post_notification('SIPEngineDidFail', sender=self) return else: self.notification_center.post_notification('SIPEngineDidStart', sender=self) failed = False while not self._thread_stopping: try: failed = self._ua.poll() except: log.exception('Exception occurred while running the Engine') exc_type, exc_val, exc_tb = sys.exc_info() self.notification_center.post_notification('SIPEngineGotException', sender=self, data=NotificationData(type=exc_type, value=exc_val, traceback="".join(traceback.format_exception(exc_type, exc_val, exc_tb)))) failed = True if failed: self.notification_center.post_notification('SIPEngineDidFail', sender=self) break if not failed: self.notification_center.post_notification('SIPEngineWillEnd', sender=self) self._ua.dealloc() del self._ua self.notification_center.post_notification('SIPEngineDidEnd', sender=self)
def _EH_CallFunctionEvent(self, event): try: event.function(*event.args, **event.kw) except: log.exception( 'Exception occurred while calling function %s in the GUI thread' % event.function.__name__)
def run(self): """Run the event queue processing loop in its own thread""" while not self._exit.isSet(): self._active.wait() event = self.queue.get() if event is StopProcessing: break elif event is ProcessEvents: if self._waiting: preserved = [] try: unhandled = self.handle(self._waiting) if not isinstance(unhandled, (list, type(None))): raise ValueError('%s handler must return a list of unhandled events or None' % self.__class__.__name__) if unhandled is not None: preserved = unhandled # preserve the unhandled events that the handler returned except Exception: log.exception('Unhandled exception during event handling') self._waiting = preserved elif event is DiscardEvents: self._waiting = [] else: if getattr(event, 'high_priority', False): try: self.handle([event]) except Exception: log.exception('Unhandled exception during high priority event handling') finally: del event # do not reference this event until the next event arrives, in order to allow it to be released else: self._waiting.append(event)
def start(self): interface = WebServerConfig.local_ip port = WebServerConfig.local_port cert_path = WebServerConfig.certificate.normalized if WebServerConfig.certificate else None cert_chain_path = WebServerConfig.certificate_chain.normalized if WebServerConfig.certificate_chain else None if cert_path is not None: if not os.path.isfile(cert_path): log.error('Certificate file %s could not be found' % cert_path) return try: ssl_ctx_factory = DefaultOpenSSLContextFactory(cert_path, cert_path) except Exception: log.exception('Creating TLS context') log.err() return if cert_chain_path is not None: if not os.path.isfile(cert_chain_path): log.error('Certificate chain file %s could not be found' % cert_chain_path) return ssl_ctx = ssl_ctx_factory.getContext() try: ssl_ctx.use_certificate_chain_file(cert_chain_path) except Exception: log.exception('Setting TLS certificate chain file') log.err() return self.listener = reactor.listenSSL(port, self.site, ssl_ctx_factory, backlog=511, interface=interface) scheme = 'https' else: self.listener = reactor.listenTCP(port, self.site, backlog=511, interface=interface) scheme = 'http' port = self.listener.getHost().port self.__dict__['url'] = '%s://%s:%d' % (scheme, WebServerConfig.hostname or interface.normalized, port) log.msg('Web server listening for requests on: %s' % self.url)
def add(self, app_class): try: app = app_class() except Exception as e: log.exception( 'Failed to initialize {app.__appname__!r} application: {exception!s}' .format(app=app_class, exception=e)) else: self.application_map[app.__appname__] = app
def update_statistics(self, session, stats): session.logger.info('statistics: {}'.format(stats)) if stats['start_time'] is not None: for accounting in self.accounting: try: accounting.do_accounting(stats) except Exception, e: log.exception( 'An unhandled error occurred while doing accounting: %s' % e)
def _CH_process_results(self, command): for file in (f for f in command.files if not f.closed): try: _bonjour.DNSServiceProcessResult(file.file) except: # Should we close the file? The documentation doesn't say anything about this. -Luci log.exception() for file in command.files: file.active = False self._files = [f for f in self._files if not f.closed] self._select_proc.kill(RestartSelect)
def lookup(self, key): network = self.networks.get("sip_proxy", None) if network is None: return None try: node = network.lookup_node(key) except LookupError: node = None except Exception: log.exception() node = None return node
def run(self): """Run the event queue processing loop in its own thread""" while not self._exit.isSet(): self._active.wait() event = self.queue.get() if event is StopProcessing: break try: self.handle(event) except Exception: log.exception('Unhandled exception during event handling') finally: del event # do not reference this event until the next event arrives, in order to allow it to be released
def run(self): self.notification_center.post_notification('SIPEngineWillStart', sender=self) init_options = Engine.default_start_options.copy() init_options.update(self._options) try: self._ua = PJSIPUA(self._handle_event, **init_options) except Exception: log.exception('Exception occurred while starting the Engine') exc_type, exc_val, exc_tb = sys.exc_info() exc_tb = "".join( traceback.format_exception(exc_type, exc_val, exc_tb)) self.notification_center.post_notification('SIPEngineGotException', sender=self, data=NotificationData( type=exc_type, value=exc_val, traceback=exc_tb)) self.notification_center.post_notification('SIPEngineDidFail', sender=self) return else: self.notification_center.post_notification('SIPEngineDidStart', sender=self) failed = False while not self._thread_stopping: try: failed = self._ua.poll() except: log.exception('Exception occurred while running the Engine') exc_type, exc_val, exc_tb = sys.exc_info() self.notification_center.post_notification( 'SIPEngineGotException', sender=self, data=NotificationData(type=exc_type, value=exc_val, traceback="".join( traceback.format_exception( exc_type, exc_val, exc_tb)))) failed = True if failed: self.notification_center.post_notification('SIPEngineDidFail', sender=self) break if not failed: self.notification_center.post_notification('SIPEngineWillEnd', sender=self) self._ua.dealloc() del self._ua self.notification_center.post_notification('SIPEngineDidEnd', sender=self)
def write_response(self, chunk, code, comment, wait=True): """Generate and write the response, lose the connection in case of error""" try: response = make_response(chunk, code, comment) except ChunkParseError as ex: log.error('Failed to generate a response: %s' % ex) self.loseConnection(wait=False) raise except Exception: log.exception('Failed to generate a response') self.loseConnection(wait=False) raise else: if response is not None: self.write_chunk(response, wait=wait)
def _worker(self): thread = current_thread() while True: task = self._queue.get() if task is self.StopWorker: break try: task.function(*task.args, **task.kw) except: log.exception('Exception occurred while calling %r in the %r thread' % (task.function, thread.name)) finally: with self._lock: self.__dict__['jobs'] -= 1 del task self._threads.remove(thread)
def lineReceived(self, line): if line == 'pong': self._queued_keepalives -= 1 return if self.command is None: try: command, seq = line.split() except ValueError: log.error( 'Could not decode command/sequence number pair from dispatcher: %s' % line) return if command in self.required_headers: self.command = command self.seq = seq self.headers = DecodingDict() else: log.error('Unknown command: %s' % command) self.reply('{} error'.format(seq)) elif line == '': missing_headers = self.required_headers[self.command].difference( self.headers) if missing_headers: for header in missing_headers: log.error('Missing mandatory header %r from %r command' % (header, self.command)) response = 'error' else: # noinspection PyBroadException try: response = self.factory.parent.got_command( self.factory.host, self.command, self.headers) except Exception: log.exception() response = 'error' self.reply('{} {}'.format(self.seq, response)) self.command = None else: try: name, value = line.split(": ", 1) except ValueError: log.error('Unable to parse header: %s' % line) else: try: self.headers[name] = value except DecodingError, e: log.error('Could not decode header: %s' % e)
def stop(self): self.authorization_handler.stop() notification_center = NotificationCenter() notification_center.remove_observer(self, name='SIPSessionNewIncoming') notification_center.remove_observer( self, name='SIPIncomingSubscriptionGotSubscribe') notification_center.remove_observer(self, name='SIPIncomingReferralGotRefer') notification_center.remove_observer( self, name='SIPIncomingRequestGotRequest') for app in self.application_registry: try: app.stop() except Exception as e: log.exception( 'Failed to stop {app.__appname__!r} application: {exception!s}' .format(app=app, exception=e))
def _worker(self): thread = current_thread() while True: task = self._queue.get() if task is self.StopWorker: break # noinspection PyBroadException try: task.function(*task.args, **task.kw) except: log.exception( 'Unhandled exception while calling %r in the %r thread' % (task.function, thread.name)) finally: with self._lock: self.__dict__['jobs'] -= 1 del task self._threads.remove(thread)
def start(self): interface = WebServerConfig.local_ip port = WebServerConfig.local_port cert_path = WebServerConfig.certificate.normalized if WebServerConfig.certificate else None cert_chain_path = WebServerConfig.certificate_chain.normalized if WebServerConfig.certificate_chain else None if cert_path is not None: if not os.path.isfile(cert_path): log.error('Certificate file %s could not be found' % cert_path) return try: ssl_ctx_factory = DefaultOpenSSLContextFactory( cert_path, cert_path) except Exception: log.exception('Creating TLS context') return if cert_chain_path is not None: if not os.path.isfile(cert_chain_path): log.error('Certificate chain file %s could not be found' % cert_chain_path) return ssl_ctx = ssl_ctx_factory.getContext() try: ssl_ctx.use_certificate_chain_file(cert_chain_path) except Exception: log.exception('Setting TLS certificate chain file') return self.listener = reactor.listenSSL(port, self.site, ssl_ctx_factory, backlog=511, interface=interface) scheme = 'https' else: self.listener = reactor.listenTCP(port, self.site, backlog=511, interface=interface) scheme = 'http' port = self.listener.getHost().port self.__dict__['url'] = '%s://%s:%d' % (scheme, WebServerConfig.hostname or interface.normalized, port) log.info('Web server listening for requests on: %s' % self.url)
class Backend(object): """Configuration datatype, used to select a backend module from the configuration file.""" def __new__(typ, value): value = value.lower() try: return __import__('xcap.backend.%s' % value, globals(), locals(), ['']) except (ImportError, AssertionError), e: log.critical('Cannot load %r backend module: %s' % (value, e)) sys.exit(1) except Exception: log.exception() sys.exit(1)
def post_notification(self, name, sender=UnknownSender, data=NotificationData()): """ Post a notification which will be delivered to all observers whose subscription matches the name and sender attributes of the notification. """ notification = Notification(name, sender, data) notification.center = self queue = self.queue queue.append(notification) # noinspection PyTypeChecker if len( queue ) > 1: # This is true if we post a notification from inside a notification handler return empty_set = set() while queue: notification = queue[0] observers = ( self.observers.get((Any, Any), empty_set) | self.observers.get( (Any, notification.sender), empty_set) | self.observers.get( (notification.name, Any), empty_set) | self.observers.get( (notification.name, notification.sender), empty_set)) for observer in observers: try: observer.handle_notification(notification) except Exception: log.exception( 'Unhandled exception in notification observer %r while handling notification %r' % (observer, notification.name)) queue.popleft()
sys.exit(1) # process was successfully put in the background. Redirect logging to syslog log.start_syslog(name) # This log line will go to syslog log.info('application started (running in the background)') # Add a signal handler for SIGUSR1 process.signals.add_handler(signal.SIGUSR1, signal_handler) # Add another signal handler for SIGUSR1. Multiple handlers can be added # for a given signal by different components/modules/threads of the # application. The only limitation is that the first handler must be added # from the main thread. process.signals.add_handler(signal.SIGUSR1, signal_handler2) log.info("sending SIGUSR1 to self") os.kill(os.getpid(), signal.SIGUSR1) log.info("sleeping for 3 seconds") time.sleep(3) # Simulate some error try: # noinspection PyUnresolvedReferences bar = foo except NameError as e: # Log an error message and the backtrace log.exception("cannot access foo: %s" % e) log.info("program done, exiting")
def _EH_CallFunctionEvent(self, event): try: event.function(*event.args, **event.kw) except: log.exception("Exception occurred while calling function %s in the GUI thread" % event.function.__name__)
sys.exit(1) # process was successfully put in the background. Redirect logging to syslog log.start_syslog(name) # This log line will go to syslog log.msg('application started (running in the background)') # Add a signal handler for SIGUSR1 process.signals.add_handler(signal.SIGUSR1, signal_handler) # Add another signal handler for SIGUSR1. Multiple handlers can be added # for a given signal by different components/modules/threads of the # application. The only limitation is that the first handler must be added # from the main thread. process.signals.add_handler(signal.SIGUSR1, signal_handler2) log.msg("sending SIGUSR1 to self") os.kill(os.getpid(), signal.SIGUSR1) log.msg("sleeping for 3 seconds") time.sleep(3) # Simulate some error try: bar = foo except NameError, e: log.error("cannot access foo: %s" % e) # Also log the backtrace log.exception() log.msg("program done, exiting")
# process was succesfully put in the background. Redirect logging to syslog log.start_syslog(name) # This log line will go to syslog log.msg('application started (running in the background)') # Add a signal handler for SIGUSR1 process.signals.add_handler(signal.SIGUSR1, signal_handler) # Add another signal handler for SIGUSR1. Mutliple handlers can be added # for a given signal by different components/modules/threads of the # application. The only limitation is that the first handler must be added # from the main thread. process.signals.add_handler(signal.SIGUSR1, signal_handler2) log.msg("sending SIGUSR1 to self") os.kill(os.getpid(), signal.SIGUSR1) log.msg("sleeping for 3 seconds") time.sleep(3) # Simulate some error try: bar = foo except NameError, e: log.error("cannot access foo: %s" % e) # Also log the backtrace log.exception() log.msg("program done, exiting")
def exception(self, message=None, **context): if message is not None: message = self.prefix+message log.exception(message, **context)
def pack_fn(filepath, filepath_zip, paths_remap_relbase, all_deps, report, mode): """ 'paths_remap_relbase' is the project path, we want all paths to be relative to this so we don't get server path included. """ import os from bam.blend import blendfile_pack assert(os.path.exists(filepath) and not os.path.isdir(filepath)) log.info(" Source path: %r" % filepath) log.info(" Zip path: %r" % filepath_zip) deps_remap = {} paths_remap = {} paths_uuid = {} binary_edits = {} if filepath.endswith(".blend"): # find the path relative to the project's root blendfile_src_dir_fakeroot = os.path.dirname(os.path.relpath(filepath, paths_remap_relbase)) try: yield from blendfile_pack.pack( filepath.encode('utf-8'), filepath_zip.encode('utf-8'), mode=mode, paths_remap_relbase=paths_remap_relbase.encode('utf-8'), deps_remap=deps_remap, paths_remap=paths_remap, paths_uuid=paths_uuid, all_deps=all_deps, report=report, blendfile_src_dir_fakeroot=blendfile_src_dir_fakeroot.encode('utf-8'), readonly=True, binary_edits=binary_edits, ) except: log.exception("Error packing the blend file") return else: # non blend-file from bam.utils.system import uuid_from_file paths_uuid[os.path.basename(filepath)] = uuid_from_file(filepath) del uuid_from_file import zipfile with zipfile.ZipFile(filepath_zip, 'w', zipfile.ZIP_DEFLATED) as zip_handle: zip_handle.write( filepath, arcname=os.path.basename(filepath), ) del zipfile # simple case paths_remap[os.path.basename(filepath)] = os.path.basename(filepath) if os.path.isfile(filepath): paths_remap["."] = os.path.relpath(os.path.dirname(filepath), paths_remap_relbase) else: # TODO(cam) directory support paths_remap["."] = os.path.relpath(filepath, paths_remap_relbase) # TODO, avoid reopening zipfile # append json info to zip import zipfile with zipfile.ZipFile(filepath_zip, 'a', zipfile.ZIP_DEFLATED) as zip_handle: import json def write_dict_as_json(f, dct): zip_handle.writestr( f, json.dumps(dct, check_circular=False, # optional (pretty) sort_keys=True, indent=4, separators=(',', ': '), ).encode('utf-8')) write_dict_as_json(".bam_deps_remap.json", deps_remap) write_dict_as_json(".bam_paths_remap.json", paths_remap) write_dict_as_json(".bam_paths_uuid.json", paths_uuid) import pickle zip_handle.writestr(".bam_paths_edit.data", pickle.dumps(binary_edits, pickle.HIGHEST_PROTOCOL)) del write_dict_as_json del binary_edits
def pack_fn(filepath, filepath_zip, paths_remap_relbase, all_deps, report, mode): """ 'paths_remap_relbase' is the project path, we want all paths to be relative to this so we don't get server path included. """ import os from bam.blend import blendfile_pack assert (os.path.exists(filepath) and not os.path.isdir(filepath)) log.info(" Source path: %r" % filepath) log.info(" Zip path: %r" % filepath_zip) deps_remap = {} paths_remap = {} paths_uuid = {} binary_edits = {} if filepath.endswith(".blend"): # find the path relative to the project's root blendfile_src_dir_fakeroot = os.path.dirname( os.path.relpath(filepath, paths_remap_relbase)) try: yield from blendfile_pack.pack( filepath.encode('utf-8'), filepath_zip.encode('utf-8'), mode=mode, paths_remap_relbase=paths_remap_relbase.encode('utf-8'), deps_remap=deps_remap, paths_remap=paths_remap, paths_uuid=paths_uuid, all_deps=all_deps, report=report, blendfile_src_dir_fakeroot=blendfile_src_dir_fakeroot. encode('utf-8'), use_variations=True, readonly=True, binary_edits=binary_edits, ) except: log.exception("Error packing the blend file") return else: # non blend-file from bam.utils.system import uuid_from_file paths_uuid[os.path.basename(filepath)] = uuid_from_file(filepath) del uuid_from_file import zipfile with zipfile.ZipFile(filepath_zip, 'w', zipfile.ZIP_DEFLATED) as zip_handle: zip_handle.write( filepath, arcname=os.path.basename(filepath), ) del zipfile # simple case paths_remap[os.path.basename(filepath)] = os.path.basename( filepath) if os.path.isfile(filepath): paths_remap["."] = os.path.relpath(os.path.dirname(filepath), paths_remap_relbase) else: # TODO(cam) directory support paths_remap["."] = os.path.relpath(filepath, paths_remap_relbase) # TODO, avoid reopening zipfile # append json info to zip import zipfile with zipfile.ZipFile(filepath_zip, 'a', zipfile.ZIP_DEFLATED) as zip_handle: import json def write_dict_as_json(f, dct): zip_handle.writestr( f, json.dumps( dct, check_circular=False, # optional (pretty) sort_keys=True, indent=4, separators=(',', ': '), ).encode('utf-8')) write_dict_as_json(".bam_deps_remap.json", deps_remap) write_dict_as_json(".bam_paths_remap.json", paths_remap) write_dict_as_json(".bam_paths_uuid.json", paths_uuid) import pickle zip_handle.writestr( ".bam_paths_edit.data", pickle.dumps(binary_edits, pickle.HIGHEST_PROTOCOL)) del write_dict_as_json del binary_edits
def exception(self, message=None, **context): if message is not None: message = self.prefix + message log.exception(message, **context)