def _PE_debitbalance(self, line): valid_answers = ('Ok', 'Failed', 'Not prepaid') lines = line.splitlines() try: result = lines[0].strip().capitalize() except IndexError: raise ValueError( "Empty reply from rating engine %s:%s", (self.transport.getPeer().host, self.transport.getPeer().port)) if result not in valid_answers: log.error("Invalid reply from rating engine: got '%s' from %s:%s" % (lines[0].strip(), self.transport.getPeer().host, self.transport.getPeer().port)) log.warning('Rating engine possible failed query: %s', self.__request) raise RatingEngineError('Invalid rating engine response') elif result == 'Failed': log.warning('Rating engine failed query: %s', self.__request) raise RatingEngineError('Rating engine failed query') else: try: timelimit = int(lines[1].split('=', 1)[1].strip()) totalcost = lines[2].strip() except: log.error( "Invalid reply from rating engine for DebitBalance on lines 2, 3: got '%s' from %s:%s" % ("', `".join(lines[1:3]), self.transport.getPeer().host, self.transport.getPeer().port)) timelimit = None totalcost = 0 return timelimit, totalcost
def store_image(self, data): if data is None: return None data_hash = sha512(data).hexdigest() try: return self.filemap[data_hash].destination except KeyError: pass try: destination_name = os.path.join('images', self.available_names.popleft()) except IndexError: # No more available file names. return None pixmap = QPixmap() if pixmap.loadFromData(data): pixmap = pixmap.scaled(32, 32, Qt.KeepAspectRatio, Qt.SmoothTransformation) makedirs(ApplicationData.get('images')) if pixmap.save(ApplicationData.get(destination_name)): file_mapping = FileMapping(data_hash, destination_name) self.filemap[data_hash] = file_mapping map_filename = ApplicationData.get(os.path.join('images', '.cached_icons.map')) map_tempname = map_filename + '.tmp' try: file = open(map_tempname, 'wb') pickle.dump(self.filemap, file) file.close() if sys.platform == 'win32': unlink(map_filename) os.rename(map_tempname, map_filename) except Exception, e: log.error("could not save icon cache file mappings: %s" % e) return destination_name
def post_notification(self, name, sender=UnknownSender, data=NotificationData()): """ Post a notification which will be delivered to all observers whose subscription matches the name and sender attributes of the notification. """ notification = Notification(name, sender, data) notification.center = self queue = self.queue queue.append(notification) if len(queue) > 1: # This is true if we post a notification from inside a notification handler return empty_set = set() while queue: notification = queue[0] observers = (self.observers.get((Any, Any), empty_set) | self.observers.get((Any, notification.sender), empty_set) | self.observers.get((notification.name, Any), empty_set) | self.observers.get((notification.name, notification.sender), empty_set)) for observer in observers: try: observer.handle_notification(notification) except Exception: log.error("Exception occurred in observer %r while handling notification %r" % (observer, notification.name)) log.err() queue.popleft()
def got_command(self, dispatcher, command, headers): if command == "summary": summary = {'ip' : RelayConfig.relay_ip, 'version' : __version__, 'status' : self.status, 'uptime' : int(time() - self.start_time), 'session_count' : len(self.session_manager.sessions), 'stream_count' : self.session_manager.stream_count, 'bps_relayed' : self.session_manager.bps_relayed} return cjson.encode(summary) elif command == "sessions": return cjson.encode(self.session_manager.statistics) elif command == "update": if self.graceful_shutdown or self.shutting_down: if not self.session_manager.has_session(**headers): log.debug("cannot add new session: media-relay is shutting down") return 'halting' try: local_media = self.session_manager.update_session(dispatcher, **headers) except RelayPortsExhaustedError: log.error("Could not reserve relay ports for session, all allocated ports are being used") return "error" if local_media: return " ".join([RelayConfig.advertised_ip or local_media[0][0]] + [str(media[1]) for media in local_media]) else: # remove session = self.session_manager.remove_session(**headers) if session is None: return "error" else: return cjson.encode(session.statistics)
def start(self): interface = WebServerConfig.local_ip port = WebServerConfig.local_port cert_path = WebServerConfig.certificate.normalized if WebServerConfig.certificate else None cert_chain_path = WebServerConfig.certificate_chain.normalized if WebServerConfig.certificate_chain else None if cert_path is not None: if not os.path.isfile(cert_path): log.error('Certificate file %s could not be found' % cert_path) return try: ssl_ctx_factory = DefaultOpenSSLContextFactory(cert_path, cert_path) except Exception: log.exception('Creating TLS context') log.err() return if cert_chain_path is not None: if not os.path.isfile(cert_chain_path): log.error('Certificate chain file %s could not be found' % cert_chain_path) return ssl_ctx = ssl_ctx_factory.getContext() try: ssl_ctx.use_certificate_chain_file(cert_chain_path) except Exception: log.exception('Setting TLS certificate chain file') log.err() return self.listener = reactor.listenSSL(port, self.site, ssl_ctx_factory, backlog=511, interface=interface) scheme = 'https' else: self.listener = reactor.listenTCP(port, self.site, backlog=511, interface=interface) scheme = 'http' port = self.listener.getHost().port self.__dict__['url'] = '%s://%s:%d' % (scheme, WebServerConfig.hostname or interface.normalized, port) log.msg('Web server listening for requests on: %s' % self.url)
def send(self, request): self.deferred = request.deferred try: self.transport.write(request.command, OpenSIPSConfig.socket_path) except socket.error, why: log.error("cannot write request to `%s': %s" % (OpenSIPSConfig.socket_path, why[1])) self.deferred.errback(Failure(CommandError("Cannot send request to OpenSIPS")))
def run(self): """Run the event queue processing loop in its own thread""" while not self._exit.isSet(): self._active.wait() event = self.queue.get() if event is StopProcessing: break elif event is ProcessEvents: if self._waiting: preserved = [] try: unhandled = self.handle(self._waiting) if not isinstance(unhandled, (list, type(None))): raise ValueError("%s handler must return a list of unhandled events or None" % self.__class__.__name__) if unhandled is not None: preserved = unhandled # preserve the unhandled events that the handler returned except Exception: log.error("exception happened during event handling") log.err() self._waiting = preserved elif event is DiscardEvents: self._waiting = [] else: if getattr(event, 'high_priority', False): try: self.handle([event]) except Exception: log.error("exception happened during high priority event handling") log.err() finally: del event # do not reference this event until the next event arrives, in order to allow it to be released else: self._waiting.append(event)
def notify(self, operation, entity_type, entity): node = self.lookup(entity) if node is not None: if node.control_port is None: log.error("Could not send notify because node %s has no control port" % node.ip) return self.control.send_request(Notification("notify %s %s %s" % (operation, entity_type, entity)), (node.ip, node.control_port))
def _CC_finish_init(self, value, req): try: call = self.factory.application.calls[req.callid] except KeyError: log.error("Call id %s disappeared before we could finish initializing it" % req.callid) req.deferred.callback('Error') else: if req.call_limit is not None and len(self.factory.application.users.get(call.billingParty, ())) >= req.call_limit: self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('Call limit reached') elif call.locked: ## prepaid account already locked by another call log.info("Call id %s of %s to %s forbidden because the account is locked" % (req.callid, call.user, call.ruri)) self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('Locked') elif call.timelimit == 0: ## prepaid account with no credit log.info("Call id %s of %s to %s forbidden because credit is too low" % (req.callid, call.user, call.ruri)) self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('No credit') elif req.call_limit is not None or call.timelimit is not None: ## call limited by credit value, a global time limit or number of calls log.info("User %s can make %s concurrent calls" % (call.billingParty, req.call_limit or "unlimited")) self.factory.application.users.setdefault(call.billingParty, []).append(call.callid) req.deferred.callback('Limited') else: ## no limit for call log.info("Call id %s of %s to %s is postpaid not limited" % (req.callid, call.user, call.ruri)) self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('No limit')
def do_accounting(self, stats): attrs = {} attrs["Acct-Status-Type"] = "Update" attrs["User-Name"] = "mediaproxy@default" attrs["Acct-Session-Id"] = stats["call_id"] attrs["Acct-Session-Time"] = stats["duration"] attrs["Acct-Input-Octets"] = sum(stream_stats['caller_bytes'] for stream_stats in stats['streams']) attrs["Acct-Output-Octets"] = sum(stream_stats['callee_bytes'] for stream_stats in stats['streams']) attrs["Sip-From-Tag"] = stats["from_tag"] attrs["Sip-To-Tag"] = stats["to_tag"] or "" attrs["NAS-IP-Address"] = stats["streams"][0]["caller_local"].split(":")[0] attrs["Sip-User-Agents"] = (stats["caller_ua"] + "+" + stats["callee_ua"])[:253] attrs["Sip-Applications"] = ', '.join(sorted(set(stream['media_type'] for stream in stats['streams'] if stream['start_time'] != stream['end_time'])))[:253] attrs["Media-Codecs"] = ', '.join(stream['caller_codec'] for stream in stats['streams'])[:253] if stats["timed_out"] and not stats.get("all_streams_ice", False): attrs["Media-Info"] = "timeout" elif stats.get("all_streams_ice", False): attrs["Media-Info"] = "ICE session" else: attrs["Media-Info"] = "" for stream in stats["streams"]: if stream["post_dial_delay"] is not None: attrs["Acct-Delay-Time"] = int(stream["post_dial_delay"]) break try: self.SendPacket(self.CreateAcctPacket(**attrs)) except Exception, e: log.error("failed to send radius accounting record: %s" % e)
def _EH_CallFunctionEvent(self, event): try: event.function(*event.args, **event.kw) except: log.error('Exception occurred while calling %r in the %r thread' % (event.function, current_thread().name)) log.err()
def _RD_timedout(self, task): calls = dict([(call.callid, call) for call in task.args['calls'].values() if call.inprogress]) if not calls: return {} ids = "(%s)" % ','.join(["'" + key + "'" for key in calls.keys()]) query = '''SELECT %(session_id_field)s AS callid, %(duration_field)s AS duration, %(from_tag_field)s AS fromtag, %(to_tag_field)s AS totag FROM %(table)s WHERE %(session_id_field)s IN %(ids)s AND %(media_info_field)s = 'timeout' AND %(stop_info_field)s IS NULL''' % { 'session_id_field': RadiusDatabaseConfig.sessionIdField, 'duration_field': RadiusDatabaseConfig.durationField, 'from_tag_field': RadiusDatabaseConfig.fromTagField, 'to_tag_field': RadiusDatabaseConfig.toTagField, 'media_info_field': RadiusDatabaseConfig.mediaInfoField, 'stop_info_field': RadiusDatabaseConfig.stopInfoField, 'table': RadiusDatabaseConfig.table.normalized, 'ids': ids } try: rows = self.conn.queryAll(query) except Exception, e: log.error("Query failed: %s" % query) raise RadiusDatabaseError( "Exception while querying for timedout calls %s." % e)
def load_applications(self): for name in find_builtin_applications(): try: __import__('sylk.applications.{name}'.format(name=name)) except ImportError as e: log.error( 'Failed to load builtin application {name!r}: {exception!s}' .format(name=name, exception=e)) for name in find_extra_applications(): if name in sys.modules: # being able to log this is contingent on this function only executing once log.warning( 'Not loading extra application {name!r} as it would overshadow a system package/module' .format(name=name)) continue try: imp.load_module( name, *imp.find_module( name, [ServerConfig.extra_applications_dir.normalized])) except ImportError as e: log.error( 'Failed to load extra application {name!r}: {exception!s}'. format(name=name, exception=e))
def clientConnectionFailed(self, connector, reason): log.error( 'Could not connect to dispatcher at %(host)s:%(port)d (retrying in %%d seconds): %%s' % connector.__dict__ % (RelayConfig.reconnect_delay, reason.value)) if self.parent.connector_needs_reconnect(connector): self.delayed = reactor.callLater(RelayConfig.reconnect_delay, connector.connect)
def _RD_terminated(self, task): calls = dict([(call.callid, call) for call in list(task.args['calls'].values()) if call.inprogress]) if not calls: return {} ids = "(%s)" % ','.join(["'" + key + "'" for key in list(calls.keys())]) query = """SELECT %(session_id_field)s AS callid, %(duration_field)s AS duration, %(from_tag_field)s AS fromtag, %(to_tag_field)s AS totag FROM %(table)s WHERE %(session_id_field)s IN %(ids)s AND (%(stop_info_field)s IS NOT NULL OR %(stop_time_field)s IS NOT NULL)""" % {'session_id_field': RadiusDatabaseConfig.sessionIdField, 'duration_field': RadiusDatabaseConfig.durationField, 'from_tag_field': RadiusDatabaseConfig.fromTagField, 'to_tag_field': RadiusDatabaseConfig.toTagField, 'stop_info_field': RadiusDatabaseConfig.stopInfoField, 'stop_time_field': RadiusDatabaseConfig.stopTimeField, 'table': RadiusDatabaseConfig.table.normalized, 'ids': ids} try: rows = self.conn.queryAll(query) except Exception as e: log.error("Query failed: %s" % query) raise RadiusDatabaseError("Exception while querying for terminated calls %s." % e) def find(row, calls): try: call = calls[row[0]] except KeyError: return False return call.fromtag==row[2] and call.totag==row[3] return dict([(row[0], {'callid': row[0], 'duration': row[1], 'fromtag': row[2], 'totag': row[3]}) for row in rows if find(row, calls)])
def _CC_init(self, req): try: call = self.factory.application.calls[req.callid] except KeyError: call = Call(req, self.factory.application) if call.billingParty is None: req.deferred.callback('Error') return self.factory.application.calls[req.callid] = call # log.debug("Call id %s added to list of controlled calls" % (call.callid)) #DEBUG else: if call.token != req.call_token: log.error("Call id %s is duplicated" % call.callid) req.deferred.callback('Duplicated callid') return # The call was previously setup which means it could be in the the users table try: user_calls = self.factory.application.users[call.billingParty] user_calls.remove(call.callid) if len(user_calls) == 0: del self.factory.application.users[call.billingParty] self.factory.application.engines.remove_user(call.billingParty) except (ValueError, KeyError): pass deferred = call.setup(req) deferred.addCallbacks(callback=self._CC_finish_init, errback=self._CC_init_failed, callbackArgs=[req], errbackArgs=[req])
def _NH_SIPApplicationGotFatalError(self, notification): log.error('Fatal error:\n{}'.format(notification.data.traceback)) QMessageBox.critical( self.main_window, "Fatal Error", "A fatal error occurred, {} will now exit.".format( self.applicationName())) sys.exit(1)
def start(self, searchtag): try: con = sqlite.connect(TWITTERBOT_DB) con.isolation_level = None db_max = con.execute("SELECT MAX(id) FROM twitts") max_id = db_max.fetchone()[0] twitts = self._api.GetSearch("#" + searchtag, include_entities=True, count=100, lang='en', since_id=max_id) for twitt in reversed(twitts): try: twitt_id = twitt.id except IndexError: twitt_id = twitt.id twitt_author = twitt.user.screen_name.encode("utf8") twitt_content = twitt.text # Check if searchtag is included in text for hashtag in twitt.hashtags: if hashtag.text.lower() != searchtag: continue if self.user == twitt_author: # I don't want to RT my own twitts! continue if twitt_author in Config.blacklist.split(','): # Don't tweet people in blacklist continue # Avoid duplicated twitts because of retwitting tmp = twitt_content if tmp.find('RT @') != -1: tmp = tmp[tmp.find('RT @'):] m = self._rt_regex.match(tmp) or self._via_regex.match(tmp) if m: data = m.groupdict() tmp = data['tweet'] if not tmp: continue db_content = con.execute( "SELECT id FROM twitts WHERE content MATCH ?", [tmp[:100]]) if db_content.fetchall(): continue try: message = "RT @%s: %s" % (twitt_author, twitt_content) if len(message) > 140: message = "%s..." % message[:137] self._api.PostUpdate(message) except twitter.TwitterError, e: log.error("Twitter Error: %s" % e.message) else: con.execute("INSERT INTO twitts(id, content) VALUES(?, ?)", [twitt_id, message]) con.close()
def _NH_SIPApplicationGotFatalError(self, notification): log.error("Fatal error:\n{}".format(notification.data.traceback)) QMessageBox.critical( self.main_window, u"Fatal Error", u"A fatal error occurred, {} will now exit.".format(self.applicationName()), ) sys.exit(1)
def _EH_CallFunctionEvent(self, event): try: event.function(*event.args, **event.kw) except: log.error( 'Exception occured while calling function %s in the GUI thread' % event.function.__name__) log.err()
def write_response(self, chunk, code, comment, wait=True): """Generate and write the response, lose the connection in case of error""" try: response = make_response(chunk, code, comment) except ChunkParseError, ex: log.error('Failed to generate a response: %s' % ex) self.loseConnection(wait=False) raise
def send(self, request): self.deferred = request.deferred try: self.transport.write(request.command, OpenSIPSConfig.socket_path) except socket.error, why: log.error("cannot write request to %s: %s" % (OpenSIPSConfig.socket_path, why[1])) self.deferred.errback( Failure(CommandError("Cannot send request to OpenSIPS")))
def update_statistics(self, stats): log.debug("Got statistics: %s" % stats) if stats["start_time"] is not None: for accounting in self.accounting: try: accounting.do_accounting(stats) except Exception, e: log.error("An unhandled error occured while doing accounting: %s" % e) log.err()
def _load_backend(self): backend_name = Configuration.hal_backend if backend_name: if '.' not in backend_name: backend_name = 'op2d.hal.backend.' + backend_name try: backend = import_module(backend_name) return backend.Backend() except Exception, e: log.error('Failed to load HAL backend: %s' % e)
def _send_keepalive(self): if self._queued_keepalives >= 3: # 3 keepalives in a row didn't get an answer. assume connection is down. log.error("missed 3 keepalive answers in a row. assuming the connection is down.") # do not use loseConnection() as it waits to flush the output buffers. reactor.callLater(0, self.transport.connectionLost, failure.Failure(TCPTimedOutError())) return None self.transport.write("ping\r\n") self._queued_keepalives += 1 return KeepRunning
def update_statistics(self, stats): log.debug("Got statistics: %s" % stats) if stats["start_time"] is not None: for accounting in self.accounting: try: accounting.do_accounting(stats) except Exception, e: log.error( "An unhandled error occured while doing accounting: %s" % e) log.err()
def _parse_ns_bindings(self, query): ns_bindings = {} ns_matches = self.XMLNS_REGEXP.findall(query) for m in ns_matches: try: prefix, ns = m.split('=') ns_bindings[prefix] = ns except ValueError: log.error("Ignoring invalid XPointer XMLNS expression: %r" % m) continue return ns_bindings
def __init__(self, dburi): if ':memory:' in dburi: log.warn('SQLite in-memory DB is not supported') dburi = None self._uri = dburi if self._uri is not None: try: self.conn = connectionForURI(self._uri) sqlhub.processConnection = self.conn except Exception, e: log.error('Error connection with the DB: %s' % e) self.conn = Null
def clientConnectionLost(self, connector, reason): self.cancel_delayed() if reason.type != ConnectionDone: log.error("Connection with dispatcher at %(host)s:%(port)d was lost: %%s" % connector.__dict__ % reason.value) else: log.msg("Connection with dispatcher at %(host)s:%(port)d was closed" % connector.__dict__) if self.parent.connector_needs_reconnect(connector): if isinstance(reason.value, CertificateError) or self.connection_lost: self.delayed = reactor.callLater(RelayConfig.reconnect_delay, connector.connect) else: self.delayed = reactor.callLater(min(RelayConfig.reconnect_delay, 1), connector.connect) self.connection_lost = True
def __new__(cls, did): if did is None: return None try: h_entry, h_id = did.split(':') except: log.error("invalid dialog_id value: `%s'" % did) return None instance = str.__new__(cls, did) instance.h_entry = h_entry instance.h_id = h_id return instance
def _send_keepalive(self): if self._queued_keepalives >= 3: log.error( 'missed 3 keepalive answers in a row. assuming the connection is down.' ) # do not use loseConnection() as it waits to flush the output buffers. reactor.callLater(0, self.transport.connectionLost, failure.Failure(TCPTimedOutError())) return None self.transport.write('ping' + self.delimiter) self._queued_keepalives += 1 return KeepRunning
def _create_connections_as_needed(self): while self.workers < self.max and len(self.waiters) > len(self.connections): socket_name = "opensips_%s%02d.sock" % (self.id, self.workers+1) socket_path = process.runtime_file(socket_name) unlink(socket_path) try: conn = UNIXSocketConnection(socket_path) except CannotListenError, why: log.error("cannot create an OpenSIPS UNIX socket connection: %s" % str(why)) break self.connections.append(conn) self.workers += 1
def notify(self, operation, entity_type, entity): node = self.lookup(entity) if node is not None: if node.control_port is None: log.error( "Could not send notify because node %s has no control port" % node.ip) return self.control.send_request( Notification("notify %s %s %s" % (operation, entity_type, entity)), (node.ip, node.control_port))
def start(self, searchtag): try: con = sqlite.connect(TWITTERBOT_DB) con.isolation_level = None db_max = con.execute("SELECT MAX(id) FROM twitts") max_id = db_max.fetchone()[0] twitts = self._api.GetSearch("#"+searchtag, include_entities=True, count=100,lang='en', since_id=max_id) for twitt in reversed(twitts): try: twitt_id = twitt.id except IndexError: twitt_id = twitt.id twitt_author = twitt.user.screen_name.encode("utf8") twitt_content = twitt.text # Check if searchtag is included in text for hashtag in twitt.hashtags: if hashtag.text.lower() != searchtag: continue if self.user == twitt_author: # I don't want to RT my own twitts! continue if twitt_author in Config.blacklist.split(','): # Don't tweet people in blacklist continue # Avoid duplicated twitts because of retwitting tmp = twitt_content if tmp.find('RT @') != -1: tmp = tmp[tmp.find('RT @'):] m = self._rt_regex.match(tmp) or self._via_regex.match(tmp) if m: data = m.groupdict() tmp = data['tweet'] if not tmp: continue db_content = con.execute("SELECT id FROM twitts WHERE content MATCH ?", [tmp[:100]]) if db_content.fetchall(): continue try: message = "RT @%s: %s" % (twitt_author, twitt_content) if len(message) > 140: message = "%s..." % message[:137] self._api.PostUpdate(message) except twitter.TwitterError, e: log.error("Twitter Error: %s" % e.message) else: con.execute("INSERT INTO twitts(id, content) VALUES(?, ?)", [twitt_id, message]) con.close()
def load_backend(backend_name): if backend_name: mods = [backend_name] if '.' not in backend_name: mods.insert(0, 'op2d.hal.backend.' + backend_name) excs = [] for mod in mods: try: backend = import_module(mod) return backend.Backend() except Exception, e: excs.append(e) log.error('Failed to load HAL backend: %s' % ' | '.join(str(e) for e in excs))
def check_incoming_SEND_chunk(self, chunk): """Check the 'To-Path' and 'From-Path' of the incoming SEND chunk. Return None is the paths are valid for this connection. If an error is detected and MSRPError is created and returned. """ assert chunk.method == 'SEND', repr(chunk) if chunk.to_path is None: return MSRPBadRequest('To-Path header missing') if chunk.from_path is None: return MSRPBadRequest('From-Path header missing') to_path = list(chunk.to_path) from_path = list(chunk.from_path) expected_to = [self.local_uri] expected_from = self.local_path + self.remote_path + [self.remote_uri] # Match only session ID when use_sessmatch is set (http://tools.ietf.org/html/draft-ietf-simple-msrp-sessmatch-10) if self.use_sessmatch: if to_path[0].session_id != expected_to[0].session_id: log.error('To-Path: expected session_id %s, got %s' % (expected_to[0].session_id, to_path[0].session_id)) return MSRPNoSuchSessionError('Invalid To-Path') if from_path[0].session_id != expected_from[0].session_id: log.error('From-Path: expected session_id %s, got %s' % (expected_from[0].session_id, from_path[0].session_id)) return MSRPNoSuchSessionError('Invalid From-Path') else: if to_path != expected_to: log.error('To-Path: expected %r, got %r' % (expected_to, to_path)) return MSRPNoSuchSessionError('Invalid To-Path') if from_path != expected_from: log.error('From-Path: expected %r, got %r' % (expected_from, from_path)) return MSRPNoSuchSessionError('Invalid From-Path')
def check_incoming_SEND_chunk(self, chunk): """Check the 'To-Path' and 'From-Path' of the incoming SEND chunk. Return None is the paths are valid for this connection. If an error is detected and MSRPError is created and returned. """ assert chunk.method=='SEND', repr(chunk) try: ToPath = chunk.headers['To-Path'] except KeyError: return MSRPBadRequest('To-Path header missing') try: FromPath = chunk.headers['From-Path'] except KeyError: return MSRPBadRequest('From-Path header missing') ToPath = list(ToPath.decoded) FromPath = list(FromPath.decoded) ExpectedTo = [self.local_uri] ExpectedFrom = self.local_path + self.remote_path + [self.remote_uri] # Match only session ID when use_sessmatch is set (http://tools.ietf.org/html/draft-ietf-simple-msrp-sessmatch-10) if self.use_sessmatch: if ToPath[0].session_id != ExpectedTo[0].session_id: log.error('To-Path: expected session_id %s, got %s' % (ExpectedTo[0].session_id, ToPath[0].session_id)) return MSRPNoSuchSessionError('Invalid To-Path') if FromPath[0].session_id != ExpectedFrom[0].session_id: log.error('From-Path: expected session_id %s, got %s' % (ExpectedFrom[0].session_id, FromPath[0].session_id)) return MSRPNoSuchSessionError('Invalid From-Path') else: if ToPath != ExpectedTo: log.error('To-Path: expected %r, got %r' % (ExpectedTo, ToPath)) return MSRPNoSuchSessionError('Invalid To-Path') if FromPath != ExpectedFrom: log.error('From-Path: expected %r, got %r' % (ExpectedFrom, FromPath)) return MSRPNoSuchSessionError('Invalid From-Path')
def run(self): """Run the event queue processing loop in its own thread""" while not self._exit.isSet(): self._active.wait() event = self.queue.get() if event is StopProcessing: break try: self.handle(event) except Exception: log.error("exception happened during event handling") log.err() finally: del event # do not reference this event until the next event arrives, in order to allow it to be released
def lineReceived(self, line): if line in ["quit", "exit"]: self.transport.loseConnection() elif line == "summary": defer = self.factory.dispatcher.relay_factory.get_summary() self._add_callbacks(defer) elif line == "sessions": defer = self.factory.dispatcher.relay_factory.get_statistics() self._add_callbacks(defer) elif line == "version": self.reply(__version__) else: log.error("Unknown command on management interface: %s" % line) self.reply("error")
def start(self, searchtag): try: con = sqlite.connect(TWITTERBOT_DB) con.isolation_level = None twitts = self.search_tag(searchtag) for twitt in reversed(twitts['entries']): try: twitt_id = twitt.id.split(':')[2] except IndexError: twitt_id = twitt.id.split(':') twitt_author = twitt.author.split(' ')[0] twitt_content = twitt.title if self.user == twitt_author: # I don't want to RT my own twitts! continue if twitt_author in Config.blacklist: # I don't want to RT my own twitts! continue db_id = con.execute("SELECT id FROM twitts WHERE id MATCH ?", [twitt_id]) if db_id.fetchall(): # We already twitted this! continue # Avoid duplicated twitts because of retwitting tmp = twitt_content if tmp.find('RT @') != -1: tmp = tmp[tmp.find('RT @'):] m = self._rt_regex.match(tmp) or self._via_regex.match(tmp) if m: data = m.groupdict() tmp = data['tweet'] if not tmp: continue db_content = con.execute("SELECT id FROM twitts WHERE content MATCH ?", [tmp[:100]]) if db_content.fetchall(): continue try: message = "RT @%s: %s" % (twitt_author, twitt_content) if len(message) > 140: message = "%s..." % message[:137] self._api.PostUpdate(message) except twitter.TwitterError, e: log.error("Twitter Error: %s" % e.message) else: con.execute("INSERT INTO twitts(id, content) VALUES(?, ?)", [twitt_id, message]) con.close()
def _create_connections_as_needed(self): while self.workers < self.max and len(self.waiters) > len( self.connections): socket_name = "opensips_%s%02d.sock" % (self.id, self.workers + 1) socket_path = process.runtime_file(socket_name) unlink(socket_path) try: conn = UNIXSocketConnection(socket_path) except CannotListenError, why: log.error( "cannot create an OpenSIPS UNIX socket connection: %s" % str(why)) break self.connections.append(conn) self.workers += 1
def write_response(self, chunk, code, comment, wait=True): """Generate and write the response, lose the connection in case of error""" try: response = make_response(chunk, code, comment) except ChunkParseError as ex: log.error('Failed to generate a response: %s' % ex) self.loseConnection(wait=False) raise except Exception: log.exception('Failed to generate a response') self.loseConnection(wait=False) raise else: if response is not None: self.write_chunk(response, wait=wait)
def connectionLost(self, reason): if reason.type == ConnectionDone: log.msg("Connection with relay at %s was closed" % self.ip) elif reason.type == ConnectionReplaced: log.warn("Old connection with relay at %s was lost" % self.ip) else: log.error("Connection with relay at %s was lost: %s" % (self.ip, reason.value)) for command, defer, timer in self.commands.itervalues(): timer.cancel() defer.errback(RelayError("Relay at %s disconnected" % self.ip)) if self.timedout is True: self.timedout = False if self.disconnect_timer.active(): self.disconnect_timer.cancel() self.disconnect_timer = None self.factory.connection_lost(self)
def lineReceived(self, line): if line == 'pong': self._queued_keepalives -= 1 return if self.command is None: try: command, seq = line.split() except ValueError: log.error( 'Could not decode command/sequence number pair from dispatcher: %s' % line) return if command in self.required_headers: self.command = command self.seq = seq self.headers = DecodingDict() else: log.error('Unknown command: %s' % command) self.reply('{} error'.format(seq)) elif line == '': missing_headers = self.required_headers[self.command].difference( self.headers) if missing_headers: for header in missing_headers: log.error('Missing mandatory header %r from %r command' % (header, self.command)) response = 'error' else: # noinspection PyBroadException try: response = self.factory.parent.got_command( self.factory.host, self.command, self.headers) except Exception: log.exception() response = 'error' self.reply('{} {}'.format(self.seq, response)) self.command = None else: try: name, value = line.split(": ", 1) except ValueError: log.error('Unable to parse header: %s' % line) else: try: self.headers[name] = value except DecodingError, e: log.error('Could not decode header: %s' % e)
def do_accounting(self, stats): sqlrepr = connection.sqlrepr names = ', '.join([DatabaseConfig.callid_column, DatabaseConfig.fromtag_column, DatabaseConfig.totag_column, DatabaseConfig.info_column]) values = ', '.join((sqlrepr(v) for v in [stats["call_id"], stats["from_tag"], stats["to_tag"], cjson.encode(stats)])) q = """INSERT INTO %s (%s) VALUES (%s)""" % (DatabaseConfig.sessions_table, names, values) try: try: connection.query(q) except ProgrammingError, e: try: MediaSessions.createTable(ifNotExists=True) except OperationalError: raise e else: connection.query(q) except DatabaseError, e: log.error("failed to insert record into database: %s" % e)
def send(self, request): try: self.transport.write(json.dumps(request.__data__), OpenSIPSConfig.socket_path) except socket.error as e: log.error("cannot write request to %s: %s" % (OpenSIPSConfig.socket_path, e[1])) request.deferred.errback( Failure( Error("Cannot send MI request %s to OpenSIPS" % request.method))) else: self.transport.requests[request.id] = request request.deferred.addBoth(request.process_response) reactor.callLater(self.timeout, self._did_timeout, request) log.debug('Send MI request: {}'.format(request.__data__)) return request.deferred
def dial(): to = request.args.get('to', None) if to is None: return error_response(400, 'destionation not specified') account_id = request.args.get('from', None) account = None if account_id is not None: try: account = AccountManager().get_account(account_id) except KeyError: return error_response(400, 'invalid account specified') try: SessionManager().start_call(None, to, [AudioStream()], account=account) except Exception, e: log.error('Starting call to %s: %s' % (to, e)) log.err() return error_response(400, str(e))
def get_application(self, ruri, headers): if SYLK_APP_HEADER in headers: application_name = headers[SYLK_APP_HEADER].body.strip() else: application_name = ServerConfig.default_application if self.application_map: prefixes = ("%s@%s" % (ruri.user, ruri.host), ruri.host, ruri.user) for prefix in prefixes: if prefix in self.application_map: application_name = self.application_map[prefix] break try: return self.application_registry[application_name] except KeyError: log.error('Application %s is not loaded' % application_name) raise ApplicationNotLoadedError
def get_application(self, ruri, headers): if SYLK_APP_HEADER in headers: application = headers[SYLK_APP_HEADER].body.strip() else: application = ServerConfig.default_application if self.application_map: prefixes = ("%s@%s" % (ruri.user, ruri.host), ruri.host, ruri.user) for prefix in prefixes: if prefix in self.application_map: application = self.application_map[prefix] break try: app = self.applications[application] except KeyError: log.error('Application %s is not loaded' % application) raise ApplicationNotLoadedError else: return app()
def format_log_message(request, response, reason): msg = '' info = '' try: msg = format_access_record(request, response) code = getattr(response, 'code', None) info += log_format_request_headers(code, request) info += log_format_request_body(code, request) info += log_format_response_headers(code, response) info += log_format_response_body(code, response) info += log_format_stacktrace(code, reason) except Exception: log.error('Formatting log message failed') log.err() if info[-1:]=='\n': info = info[:-1] if info: info = '\n' + info return msg + info
def lineReceived(self, line): if line == 'pong': self._queued_keepalives -= 1 return if self.command is None: try: command, seq = line.split() except ValueError: log.error("Could not decode command/sequence number pair from dispatcher: %s" % line) return if command in self.required_headers: self.command = command self.seq = seq self.headers = DecodingDict() else: log.error("Unknown command: %s" % command) self.transport.write("%s error\r\n" % seq) elif line == "": try: missing_headers = self.required_headers[self.command].difference(self.headers) if missing_headers: for header in missing_headers: log.error("Missing mandatory header '%s' from '%s' command" % (header, self.command)) response = "error" else: try: response = self.factory.parent.got_command(self.factory.host, self.command, self.headers) except: log.err() response = "error" finally: self.transport.write("%s %s\r\n" % (self.seq, response)) self.command = None else: try: name, value = line.split(": ", 1) except ValueError: log.error("Unable to parse header: %s" % line) else: try: self.headers[name] = value except DecodingError, e: log.error("Could not decode header: %s" % e)