def _save_calls(self, result): if self.calls: log.info('Saving calls') calls_file = '%s/%s' % (process.runtime_directory, backup_calls_file) try: f = open(calls_file, 'w') except: pass else: for call in self.calls.values(): call.application = None ## we will mark timers with 'running' or 'idle', depending on their current state, ## to be able to correctly restore them later (Timer objects cannot be pickled) if call.timer is not None: if call.inprogress: call.timer.cancel() call.timer = 'running' ## temporary mark that this timer was running else: call.timer = 'idle' ## temporary mark that this timer was not running failed_dump = False try: try: cPickle.dump(self.calls, f) except Exception, why: log.warn("Failed to dump call list: %s" % why) failed_dump = True finally: f.close() if failed_dump: try: os.unlink(calls_file) except: pass else: log.info("Saved calls: %s" % str(self.calls.keys())) self.calls = {}
def refresh_file_sizes(): """Computes & stores the 'length_aggregate_in_bytes' fields of all files.""" from application.modules import file_storage matched = 0 unmatched = 0 total_size = 0 files_collection = app.data.driver.db['files'] for file_doc in files_collection.find(): file_storage.compute_aggregate_length(file_doc) length = file_doc['length_aggregate_in_bytes'] total_size += length result = files_collection.update_one( {'_id': file_doc['_id']}, {'$set': { 'length_aggregate_in_bytes': length }}) if result.matched_count != 1: log.warning('Unable to update document %s', file_doc['_id']) unmatched += 1 else: matched += 1 log.info('Updated %i file documents.', matched) if unmatched: log.warning('Unable to update %i documents.', unmatched) log.info('%i bytes (%.3f GiB) storage used in total.', total_size, total_size / 1024**3)
def refresh_file_sizes(): """Computes & stores the 'length_aggregate_in_bytes' fields of all files.""" from application.modules import file_storage matched = 0 unmatched = 0 total_size = 0 files_collection = app.data.driver.db['files'] for file_doc in files_collection.find(): file_storage.compute_aggregate_length(file_doc) length = file_doc['length_aggregate_in_bytes'] total_size += length result = files_collection.update_one({'_id': file_doc['_id']}, {'$set': {'length_aggregate_in_bytes': length}}) if result.matched_count != 1: log.warning('Unable to update document %s', file_doc['_id']) unmatched += 1 else: matched += 1 log.info('Updated %i file documents.', matched) if unmatched: log.warning('Unable to update %i documents.', unmatched) log.info('%i bytes (%.3f GiB) storage used in total.', total_size, total_size / 1024 ** 3)
def handle_event(self, event): # print "Received event: %s" % event networks = self.networks role_map = ThorEntitiesRoleMap( event.message ) # mapping between role names and lists of nodes with that role for role in ["msrprelay_server"]: try: network = networks[ role] # avoid setdefault here because it always evaluates the 2nd argument except KeyError: from thor import network as thor_network network = thor_network.new(Config.multiply) networks[role] = network new_nodes = set([node.ip for node in role_map.get(role, [])]) old_nodes = set(network.nodes) added_nodes = new_nodes - old_nodes removed_nodes = old_nodes - new_nodes if removed_nodes: for node in removed_nodes: network.remove_node(node) plural = len(removed_nodes) != 1 and 's' or '' log.info('removed %s node%s: %s', role, plural, ', '.join(removed_nodes)) if added_nodes: for node in added_nodes: network.add_node(node) plural = len(added_nodes) != 1 and 's' or '' log.info('added %s node%s: %s', role, plural, ', '.join(added_nodes))
def start(self, request): assert self.__initialized, "Trying to start an unitialized call" if self.starttime is None: self.dialogid = request.dialogid self.starttime = time.time() if self.timer is not None: log.info( "Call from %s to %s started for maximum %d seconds (%s)" % (self.user, self.ruri, self.timelimit, self.callid)) self.timer.start() # also reset all calls of user to this call's timelimit # no reason to alter other calls if this call is not prepaid if self.prepaid: rating = RatingEngineConnections.getConnection(self) rating.getCallLimit(self).addCallbacks( callback=self._start_finish_calllimit, errback=self._start_error) for callid in self.application.users[self.billingParty]: if callid == self.callid: continue call = self.application.calls[callid] if not call.prepaid: continue # only alter prepaid calls if call.inprogress: call.timelimit = self.starttime - call.starttime + self.timelimit if call.timer: call.timer.reset(self.timelimit) log.info( "Call from %s to %s also set to %d seconds (%s)" % (call.user, call.ruri, self.timelimit, callid)) elif not call.complete: call.timelimit = self.timelimit call._setup_timer()
def _NH_SIPApplicationDidEnd(self, notification): log.info('SIP application ended') tracelog_manager = TraceLogManager() tracelog_manager.stop() if not self.stopping_event.is_set(): log.warning('SIP application ended without shutting down all subsystems') self.stopping_event.set() self.stopped_event.set()
def connectionMade(self): log.info( "Connected to Rating Engine at %s:%d" % (self.transport.getPeer().host, self.transport.getPeer().port)) self.connected = True self.factory.application.connectionMade(self.transport.connector) if self._request_queue: self._send_next_request()
def _NH_SIPApplicationDidStart(self, notification): settings = SIPSimpleSettings() local_ip = SIPConfig.local_ip log.info('SylkServer started; listening on:') for transport in settings.sip.transport_list: try: log.info(' %s:%d (%s)' % (local_ip, getattr(self.engine, '%s_port' % transport), transport.upper())) except TypeError: pass
def connectionMade(self): peer = self.transport.getPeer() log.info('Connected to dispatcher at %s:%d' % (peer.host, peer.port)) if RelayConfig.passport is not None: peer_cert = self.transport.getPeerCertificate() if not RelayConfig.passport.accept(peer_cert): self.transport.loseConnection( CertificateSecurityError('peer certificate not accepted')) self._connection_watcher = RecurrentCall( RelayConfig.keepalive_interval, self._send_keepalive)
def start(self): log.info('Listening on: %s:%d' % (ServerConfig.address, ServerConfig.root.port)) log.info('XCAP root: %s' % ServerConfig.root) if ServerConfig.root.startswith('https'): self._start_https(reactor) else: reactor.listenTCP(ServerConfig.root.port, HTTPFactory(self.site), interface=ServerConfig.address) reactor.run( installSignalHandlers=ServerConfig.backend.installSignalHandlers)
def _CC_finish_init(self, value, req): try: call = self.factory.application.calls[req.callid] except KeyError: log.error("Call id %s disappeared before we could finish initializing it" % req.callid) req.deferred.callback('Error') else: if req.call_limit is not None and len(self.factory.application.users.get(call.billingParty, ())) >= req.call_limit: self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('Call limit reached') elif call.locked: ## prepaid account already locked by another call log.info("Call id %s of %s to %s forbidden because the account is locked" % (req.callid, call.user, call.ruri)) self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('Locked') elif call.timelimit == 0: ## prepaid account with no credit log.info("Call id %s of %s to %s forbidden because credit is too low" % (req.callid, call.user, call.ruri)) self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('No credit') elif req.call_limit is not None or call.timelimit is not None: ## call limited by credit value, a global time limit or number of calls log.info("User %s can make %s concurrent calls" % (call.billingParty, req.call_limit or "unlimited")) self.factory.application.users.setdefault(call.billingParty, []).append(call.callid) req.deferred.callback('Limited') else: ## no limit for call log.info("Call id %s of %s to %s is postpaid not limited" % (req.callid, call.user, call.ruri)) self.factory.application.clean_call(req.callid) call.end() req.deferred.callback('No limit')
def start(self, roles): # Needs to be called from a green thread log.info('Publishing %s roles to SIPThor' % roles) self.node = ThorEntity(SIPConfig.local_ip.normalized, roles, version=sylk.__version__) self.networks = {} self.presence_message = ThorEvent('Thor.Presence', self.node.id) self.shutdown_message = ThorEvent('Thor.Leave', self.node.id) credentials = X509Credentials(ThorNodeConfig.certificate, ThorNodeConfig.private_key, [ThorNodeConfig.ca]) credentials.verify_peer = True tls_context = TLSContext(credentials) EventServiceClient.__init__(self, ThorNodeConfig.domain, tls_context)
def connectionLost(self, reason=None): log.info( "Disconnected from Rating Engine at %s:%d" % (self.transport.getPeer().host, self.transport.getPeer().port)) self.connected = False if self.__request is not None: if self.__request.reliable: self._request_queue.appendleft(self.__request) self.__request = None else: self._respond("Connection with the Rating Engine is down: %s" % reason, success=False) self.factory.application.connectionLost(self.transport.connector, reason, self)
def _start_https(self, reactor): from gnutls.interfaces.twisted import X509Credentials from gnutls.connection import TLSContext, TLSContextServerOptions cert, pKey = TLSConfig.certificate, TLSConfig.private_key if cert is None or pKey is None: log.critical('The TLS certificate/key could not be loaded') sys.exit(1) credentials = X509Credentials(cert, pKey) tls_context = TLSContext( credentials, server_options=TLSContextServerOptions(certificate_request=None)) reactor.listenTLS(ServerConfig.root.port, HTTPFactory(self.site), tls_context, interface=ServerConfig.address) log.info('TLS started')
def handle_event(self, event): # print "Received event: %s" % event networks = self.networks role_map = ThorEntitiesRoleMap( event.message ) ## mapping between role names and lists of nodes with that role thor_databases = role_map.get('thor_database', []) if thor_databases: thor_databases.sort( lambda x, y: cmp(x.priority, y.priority) or cmp(x.ip, y.ip)) dburi = thor_databases[0].dburi else: dburi = None self._database.update_dburi(dburi) all_roles = role_map.keys() + networks.keys() for role in all_roles: try: network = networks[ role] ## avoid setdefault here because it always evaluates the 2nd argument except KeyError: from thor import network as thor_network if role in [ "thor_manager", "thor_monitor", "provisioning_server", "media_relay", "thor_database" ]: continue else: network = thor_network.new(ThorNodeConfig.multiply) networks[role] = network new_nodes = set([ ThorEntityAddress(node.ip, getattr(node, 'control_port', None), getattr(node, 'version', 'unknown')) for node in role_map.get(role, []) ]) old_nodes = set(network.nodes) added_nodes = new_nodes - old_nodes removed_nodes = old_nodes - new_nodes if removed_nodes: for node in removed_nodes: network.remove_node(node) self.control.discard_link((node.ip, node.control_port)) log.info('Removed %s nodes: %s' % (role, ', '.join(removed_nodes))) if added_nodes: for node in added_nodes: network.add_node(node) log.info('Added %s nodes: %s' % (role, ', '.join(added_nodes)))
def wait_for_network(wait_time=10, wait_message=None, test_ip='1.2.3.4'): """ Make sure the network is available and can be reached. The function will return as soon as the network is reachable or it will raise RuntimeError if network is still unreachable after wait_time. The default value for test_ip checks if internet is reachable. Optionally it can log wait_message at INFO level if the function needs to wait. """ for step in range(wait_time): local_ip = host.outgoing_ip_for(test_ip) if local_ip is not None: break elif step == 0 and wait_message is not None: log.info(wait_message) time.sleep(1) else: raise RuntimeError('Network is not available after waiting for {} seconds'.format(wait_time))
def __init__(self): main_config_file = process.configuration.file(RadiusConfig.config_file) if main_config_file is None: raise RuntimeError( 'Cannot find the radius configuration file: %r' % RadiusConfig.config_file) try: config = dict( line.rstrip('\n').split(None, 1) for line in open(main_config_file) if len(line.split(None, 1)) == 2 and not line.startswith('#')) secrets = dict( line.rstrip('\n').split(None, 1) for line in open(config['servers']) if len(line.split(None, 1)) == 2 and not line.startswith('#')) server = config['acctserver'] try: server, acctport = server.split(':') acctport = int(acctport) except ValueError: log.info( 'Could not load additional RADIUS dictionary file: %r' % RadiusConfig.additional_dictionary) acctport = 1813 log.info('Using RADIUS server at %s:%d' % (server, acctport)) secret = secrets[server] log.info("Using RADIUS dictionary file %s" % config['dictionary']) dicts = [RadiusDictionaryFile(config['dictionary'])] if RadiusConfig.additional_dictionary: additional_dictionary = process.configuration.file( RadiusConfig.additional_dictionary) if additional_dictionary: log.info("Using additional RADIUS dictionary file %s" % RadiusConfig.additional_dictionary) dicts.append(RadiusDictionaryFile(additional_dictionary)) else: log.warning( 'Could not load additional RADIUS dictionary file: %r' % RadiusConfig.additional_dictionary) raddict = pyrad.dictionary.Dictionary(*dicts) timeout = int(config['radius_timeout']) retries = int(config['radius_retries']) except Exception: log.critical('cannot read the RADIUS configuration file %s' % RadiusConfig.config_file) raise pyrad.client.Client.__init__(self, server, 1812, acctport, 3799, secret, raddict) self.timeout = timeout self.retries = retries if 'bindaddr' in config and config['bindaddr'] != '*': self.bind((config['bindaddr'], 0)) EventQueue.__init__(self, self.do_accounting)
def clientConnectionLost(self, connector, reason): self.cancel_delayed() if reason.type != ConnectionDone: log.error( 'Connection with dispatcher at %(host)s:%(port)d was lost: %%s' % connector.__dict__ % reason.value) else: log.info( 'Connection with dispatcher at %(host)s:%(port)d was closed' % connector.__dict__) if self.parent.connector_needs_reconnect(connector): if isinstance(reason.value, CertificateError) or self.connection_lost: self.delayed = reactor.callLater(RelayConfig.reconnect_delay, connector.connect) else: self.delayed = reactor.callLater( min(RelayConfig.reconnect_delay, 1), connector.connect) self.connection_lost = True
def start(self): interface = WebServerConfig.local_ip port = WebServerConfig.local_port cert_path = WebServerConfig.certificate.normalized if WebServerConfig.certificate else None cert_chain_path = WebServerConfig.certificate_chain.normalized if WebServerConfig.certificate_chain else None if cert_path is not None: if not os.path.isfile(cert_path): log.error('Certificate file %s could not be found' % cert_path) return try: ssl_ctx_factory = DefaultOpenSSLContextFactory( cert_path, cert_path) except Exception: log.exception('Creating TLS context') return if cert_chain_path is not None: if not os.path.isfile(cert_chain_path): log.error('Certificate chain file %s could not be found' % cert_chain_path) return ssl_ctx = ssl_ctx_factory.getContext() try: ssl_ctx.use_certificate_chain_file(cert_chain_path) except Exception: log.exception('Setting TLS certificate chain file') return self.listener = reactor.listenSSL(port, self.site, ssl_ctx_factory, backlog=511, interface=interface) scheme = 'https' else: self.listener = reactor.listenTCP(port, self.site, backlog=511, interface=interface) scheme = 'http' port = self.listener.getHost().port self.__dict__['url'] = '%s://%s:%d' % (scheme, WebServerConfig.hostname or interface.normalized, port) log.info('Web server listening for requests on: %s' % self.url)
def end(self, calltime=None, reason=None, sendbye=False): if sendbye and self.dialogid is not None: ManagementInterface().end_dialog(self.dialogid) if self.timer: self.timer.cancel() self.timer = None fullreason = '%s%s' % (self.inprogress and 'disconnected' or 'canceled', reason and (' by %s' % reason) or '') if self.inprogress: self.endtime = time.time() duration = self.endtime - self.starttime if calltime: ## call did timeout and was ended by external means (like mediaproxy). ## we were notified of this and we have the actual call duration in `calltime' #self.endtime = self.starttime + calltime self.duration = calltime log.info( "Call from %s to %s was already disconnected (ended or did timeout) after %s seconds (%s)" % (self.user, self.ruri, self.duration, self.callid)) elif self.expired: self.duration = self.timelimit if duration > self.timelimit + 10: log.warning( 'Time difference between sending BYEs and actual closing is > 10 seconds' ) else: self.duration = duration if not self.timelimit: self.timelimit = 0 if self.prepaid and not self.locked and self.timelimit > 0: ## even if call was not started we debit 0 seconds anyway to unlock the account rating = RatingEngineConnections.getConnection(self) rating.debitBalance(self).addCallbacks( callback=self._end_finish, errback=self._end_error, callbackArgs=[reason and fullreason or None]) elif reason is not None: log.info( "Call from %s to %s %s%s (%s)" % (self.user, self.ruri, fullreason, self.duration and (' after %d seconds' % self.duration) or '', self.callid))
def set_dir(self): """ if config directory was not specified from command line look for general.ini in /etc/sylk-pushserver if general.ini is not there, server will start with default settings """ dir, error = {}, '' config_dir = self.config_dir msg = f"Reading configuration from {config_dir}" log.info(msg) if not os.path.exists(f'{self.config_dir}/{self.cfg_file}'): config_dir = '' error = f'No {self.cfg_file} found in {self.config_dir}, ' \ f'server will run with default settings.' dir['path'], dir['error'] = config_dir, error return dir
def _start_finish_calllimit(self, limit_prepaid): (limit, prepaid) = limit_prepaid if limit not in (None, 'Locked'): delay = limit - self.timelimit for callid in self.application.users[self.billingParty]: call = self.application.calls[callid] if not call.prepaid: continue # only alter prepaid calls if call.inprogress: call.timelimit += delay if call.timer: call.timer.delay(delay) log.info( "Call from %s to %s %s maximum %d seconds (%s)" % (call.user, call.ruri, (call is self) and 'connected for' or 'previously connected set to', limit, callid)) elif not call.complete: call.timelimit = self.timelimit call._setup_timer()
def register_logger(cls, logger): # this is a class method for convenience if inspect.isclass(logger) and issubclass(logger, TraceLogger): logger = logger() assert isinstance(logger, TraceLogger), 'logger must be a TraceLogger instance or class' self = cls() if logger in self.loggers: return self.loggers.add(logger) if self.started and logger.enabled: logger.start() for name in logger.handled_notifications: self.notification_map[name].add(logger) if self.notification_map: notification_center = NotificationCenter() notification_center.add_observer(self) log.info('TraceLogManager added {logger.name} logger for {logger.owner}'.format(logger=logger))
def start(self): if self.started: return self.started = True directory = ServerConfig.trace_dir.normalized try: makedirs(directory) except Exception as e: log.error('Failed to create tracelog directory at {directory}: {exception!s}'.format(directory=directory, exception=e)) else: for logger in (logger for logger in self.loggers if logger.enabled): logger.start() for name in logger.handled_notifications: self.notification_map[name].add(logger) if self.notification_map: notification_center = NotificationCenter() notification_center.add_observer(self) log.info('TraceLogManager started in {} for: {}'.format(directory, ', '.join(sorted(logger.name for logger in self.loggers if logger.enabled)))) else: log.info('TraceLogManager started in {}'.format(directory))
def _process(self): try: req = Request(self.line_buf[0], self.line_buf[1:]) except InvalidRequestError as e: log.info("Invalid OpenSIPS request: %s" % str(e)) self._send_error_reply(failure.Failure(e)) else: log.debug('Received request from OpenSIPS %s', req) def _unknown_handler(req): req.deferred.errback(failure.Failure(CommandError(req))) try: getattr(self, '_CC_%s' % req.cmd, _unknown_handler)(req) except Exception as e: self._send_error_reply(failure.Failure(e)) else: req.deferred.addCallbacks(callback=self._send_reply, errback=self._send_error_reply)
def _end_finish(self, timelimit_value, reason): (timelimit, value) = timelimit_value if timelimit is not None and timelimit > 0: now = time.time() for callid in self.application.users.get(self.billingParty, ()): call = self.application.calls[callid] if not call.prepaid: continue # only alter prepaid calls if call.inprogress: call.timelimit = now - call.starttime + timelimit if call.timer: log.info( "Call from %s to %s previously connected set to %d seconds (%s)" % (call.user, call.ruri, timelimit, callid)) call.timer.reset(timelimit) elif not call.complete: call.timelimit = timelimit call._setup_timer() # log ended call if self.duration > 0: log.info( "Call from %s to %s %s after %d seconds, call price is %s (%s)" % (self.user, self.ruri, reason, self.duration, value, self.callid)) elif reason is not None: log.info("Call from %s to %s %s (%s)" % (self.user, self.ruri, reason, self.callid))
def got_command(self, dispatcher, command, headers): if command == 'summary': summary = { 'ip': RelayConfig.relay_ip, 'version': __version__, 'status': self.status, 'uptime': int(time() - self.start_time), 'session_count': len(self.session_manager.sessions), 'stream_count': self.session_manager.stream_count, 'bps_relayed': self.session_manager.bps_relayed } return cjson.encode(summary) elif command == 'sessions': return cjson.encode(self.session_manager.statistics) elif command == 'update': if self.graceful_shutdown or self.shutting_down: if not self.session_manager.has_session(**headers): log.info( 'cannot add new session: media-relay is shutting down') return 'halting' try: local_media = self.session_manager.update_session( dispatcher, **headers) except RelayPortsExhaustedError: log.error( 'Could not reserve relay ports for session, all allocated ports are being used' ) return 'error' if local_media: return ' '.join( [RelayConfig.advertised_ip or local_media[0][0]] + [str(media[1]) for media in local_media]) else: # command == 'remove' session = self.session_manager.remove_session(**headers) if session is None: return 'error' else: return cjson.encode(session.statistics)
def handle_event(self, event): #print "Received event: %s" % event networks = self.networks role_map = ThorEntitiesRoleMap( event.message ) # mapping between role names and lists of nodes with that role updated = False for role in self.node.roles + ('sip_proxy', ): try: network = networks[role] except KeyError: from thor import network as thor_network network = thor_network.new(ThorNodeConfig.multiply) networks[role] = network new_nodes = set([node.ip for node in role_map.get(role, [])]) old_nodes = set(network.nodes) added_nodes = new_nodes - old_nodes removed_nodes = old_nodes - new_nodes if removed_nodes: for node in removed_nodes: network.remove_node(node) plural = len(removed_nodes) != 1 and 's' or '' log.info("removed %s node%s: %s" % (role, plural, ', '.join(removed_nodes))) updated = True if added_nodes: for node in added_nodes: network.add_node(node) plural = len(added_nodes) != 1 and 's' or '' log.info("added %s node%s: %s" % (role, plural, ', '.join(added_nodes))) updated = True if updated: NotificationCenter().post_notification( 'ThorNetworkGotUpdate', sender=self, data=NotificationData(networks=self.networks))
def _handle_signal(self, signum, frame): if signum == signal.SIGUSR1: # toggle debugging if log.level.current != log.level.DEBUG: log.level.current = log.level.DEBUG log.info('Switched logging level to DEBUG') else: log.info('Switched logging level to {}'.format( RelayConfig.log_level)) log.level.current = RelayConfig.log_level else: # terminate program signal_map = { signal.SIGTERM: 'Terminated', signal.SIGINT: 'Interrupted', signal.SIGHUP: 'Graceful shutdown' } log.info( signal_map.get(signum, 'Received signal {}, exiting.'.format(signum))) self.stop(graceful=(signum == signal.SIGHUP))
def update_dispatchers(self, dispatchers): dispatchers = set(dispatchers) for new_dispatcher in dispatchers.difference(self.dispatchers): if new_dispatcher in self.old_connectors.iterkeys(): log.info('Restoring old dispatcher at %s:%d' % new_dispatcher) self.dispatcher_connectors[ new_dispatcher] = self.old_connectors.pop(new_dispatcher) else: log.info('Adding new dispatcher at %s:%d' % new_dispatcher) dispatcher_addr, dispatcher_port = new_dispatcher factory = DispatcherConnectingFactory(self, dispatcher_addr, dispatcher_port) self.dispatcher_connectors[ new_dispatcher] = reactor.connectTLS( dispatcher_addr, dispatcher_port, factory, self.tls_context) for old_dispatcher in self.dispatchers.difference(dispatchers): log.info('Removing old dispatcher at %s:%d' % old_dispatcher) self.old_connectors[ old_dispatcher] = self.dispatcher_connectors.pop( old_dispatcher) self._check_disconnect(old_dispatcher) self.dispatchers = dispatchers
def __init__(self): self.application_registry = ApplicationRegistry() self.application_registry.load_applications() log.info('Loaded applications: {}'.format(', '.join( sorted(app.__appname__ for app in self.application_registry)))) if ServerConfig.default_application not in self.application_registry: log.warning( 'Default application "%s" does not exist, falling back to "conference"' % ServerConfig.default_application) ServerConfig.default_application = 'conference' else: log.info('Default application: %s' % ServerConfig.default_application) self.application_map = dict( (item.split(':')) for item in ServerConfig.application_map) if self.application_map: txt = 'Application map:\n' inverted_app_map = defaultdict(list) for url, app in self.application_map.iteritems(): inverted_app_map[app].append(url) for app, urls in inverted_app_map.iteritems(): txt += ' {}: {}\n'.format(app, ', '.join(urls)) log.info(txt[:-1]) self.authorization_handler = AuthorizationHandler()
def info(self, message, **context): log.info(self.prefix+message, **context)
def pack_fn(filepath, filepath_zip, paths_remap_relbase, all_deps, report, mode): """ 'paths_remap_relbase' is the project path, we want all paths to be relative to this so we don't get server path included. """ import os from bam.blend import blendfile_pack assert(os.path.exists(filepath) and not os.path.isdir(filepath)) log.info(" Source path: %r" % filepath) log.info(" Zip path: %r" % filepath_zip) deps_remap = {} paths_remap = {} paths_uuid = {} binary_edits = {} if filepath.endswith(".blend"): # find the path relative to the project's root blendfile_src_dir_fakeroot = os.path.dirname(os.path.relpath(filepath, paths_remap_relbase)) try: yield from blendfile_pack.pack( filepath.encode('utf-8'), filepath_zip.encode('utf-8'), mode=mode, paths_remap_relbase=paths_remap_relbase.encode('utf-8'), deps_remap=deps_remap, paths_remap=paths_remap, paths_uuid=paths_uuid, all_deps=all_deps, report=report, blendfile_src_dir_fakeroot=blendfile_src_dir_fakeroot.encode('utf-8'), readonly=True, binary_edits=binary_edits, ) except: log.exception("Error packing the blend file") return else: # non blend-file from bam.utils.system import uuid_from_file paths_uuid[os.path.basename(filepath)] = uuid_from_file(filepath) del uuid_from_file import zipfile with zipfile.ZipFile(filepath_zip, 'w', zipfile.ZIP_DEFLATED) as zip_handle: zip_handle.write( filepath, arcname=os.path.basename(filepath), ) del zipfile # simple case paths_remap[os.path.basename(filepath)] = os.path.basename(filepath) if os.path.isfile(filepath): paths_remap["."] = os.path.relpath(os.path.dirname(filepath), paths_remap_relbase) else: # TODO(cam) directory support paths_remap["."] = os.path.relpath(filepath, paths_remap_relbase) # TODO, avoid reopening zipfile # append json info to zip import zipfile with zipfile.ZipFile(filepath_zip, 'a', zipfile.ZIP_DEFLATED) as zip_handle: import json def write_dict_as_json(f, dct): zip_handle.writestr( f, json.dumps(dct, check_circular=False, # optional (pretty) sort_keys=True, indent=4, separators=(',', ': '), ).encode('utf-8')) write_dict_as_json(".bam_deps_remap.json", deps_remap) write_dict_as_json(".bam_paths_remap.json", paths_remap) write_dict_as_json(".bam_paths_uuid.json", paths_uuid) import pickle zip_handle.writestr(".bam_paths_edit.data", pickle.dumps(binary_edits, pickle.HIGHEST_PROTOCOL)) del write_dict_as_json del binary_edits
def __trace__(cls, message, *args): if cls.__tracing__ == log.level.INFO: log.info(message % args) elif cls.__tracing__ == log.level.DEBUG: log.debug(message % args)