view_func=v.HistoryView.as_view('history')) app.add_url_rule('/<path:repo>/blame/<path:path>', view_func=v.BlobView.as_view('blame')) app.add_url_rule('/<path:repo>/blob/<path:path>', view_func=v.BlobView.as_view('blob')) app.add_url_rule('/<path:repo>/raw/<path:path>', view_func=v.RawView.as_view('raw')) app.add_url_rule('/<path:repo>/patch/<path:ref>/', view_func=v.PatchView.as_view('patch')) app.add_url_rule('/<path:repo>/commit/<path:ref>/', view_func=v.CommitView.as_view('commit')) app.add_url_rule('/<path:repo>/commits/', view_func=v.LogView.as_view('commits')) app.add_url_rule('/<path:repo>/commits/<path:ref>/', view_func=v.LogView.as_view('commits')) app.add_url_rule('/<path:repo>/tags/', view_func=v.TagsView.as_view('tags')) app.add_url_rule('/<path:repo>/snapshot/<path:ref>/<format>/', view_func=v.SnapshotView.as_view('snapshot')) # Logging if not app.debug and app.config['ADMINS']: import logging from logging.handlers import SMTPHandler mail_handler = SMTPHandler('127.0.0.1', app.config['SENDER'], app.config['ADMINS'], "Goblet error") mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if __name__ == '__main__': app.run()
moment = Moment(myApp) babel = Babel(myApp) if not myApp.debug: # Couldn't get the email to send if myApp.config["MAIL_SERVER"]: auth = None if myApp.config["MAIL_USERNAME"] or myApp.config["MAIL_PASSWORD"]: auth = (myApp.config["MAIL_USERNAME"], myApp.config["MAIL_PASSWORD"]) secure = None if myApp.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(myApp.config["MAIL_SERVER"], myApp.config["MAIL_PORT"]), fromaddr="caiopasta@" + myApp.config["MAIL_SERVER"], toaddrs=myApp.config["ADMINS"], subject="Microblog Failure", credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) myApp.logger.addHandler(mail_handler) if not os.path.exists("logs"): os.mkdir("logs") file_handler = RotatingFileHandler("logs/microblog.log", maxBytes=10240, backupCount=10) file_handler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)s: %(message)s [ in %(pathname)s:%(lineno)d]" ))
def create_app(settings=None): _log.info("Creating app") app = Flask(__name__, static_folder='frontend/static', template_folder='frontend/templates') app.config.from_object('xssp_api.default_settings') if settings: app.config.update(settings) else: # pragma: no cover app.config.from_envvar('XSSP_API_SETTINGS') # pragma: no cover # Set the maximum content length to 200MB. This is to allow large PDB files # to be sent in post requests. The largest mmCIF file found to date is # 149MB in size. app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 200 # Ignore Flask's built-in logging # app.logger is accessed here so Flask tries to create it app.logger_name = "nowhere" app.logger # Configure logging. # # It is somewhat dubious to get _log from the root package, but I can't see # a better way. Having the email handler configured at the root means all # child loggers inherit it. from xssp_api import _log as xssp_logger # Only log to email during production. if not app.debug and not app.testing: # pragma: no cover mail_handler = SMTPHandler( (app.config["MAIL_SERVER"], app.config["MAIL_SMTP_PORT"]), app.config["MAIL_FROM"], app.config["MAIL_TO"], "xssp-api failed") mail_handler.setLevel(logging.ERROR) xssp_logger.addHandler(mail_handler) mail_handler.setFormatter( logging.Formatter("Message type: %(levelname)s\n" + "Location: %(pathname)s:%(lineno)d\n" + "Module: %(module)s\n" + "Function: %(funcName)s\n" + "Time: %(asctime)s\n" + "Message:\n" + "%(message)s")) # Only log to the console during development and production, but not during # testing. if app.testing: xssp_logger.setLevel(logging.DEBUG) else: ch = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) xssp_logger.addHandler(ch) if app.debug: xssp_logger.setLevel(logging.DEBUG) else: xssp_logger.setLevel(logging.INFO) # Log to file in production as well. Log filename is loaded from # the environment, not from the settings file. maxBytes = 10MB. file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) xssp_logger.addHandler(file_handler) # Check if the upload folder exists and create it if it doesn't if not os.path.exists(app.config['UPLOAD_FOLDER']): # pragma: no cover try: os.makedirs(app.config['UPLOAD_FOLDER']) except OSError as ex: _log.error("Error creating upload folder: {}".format(ex)) sys.exit(1) # Check that the process has permission to write in the upload folder try: test_filename = os.path.join(app.config['UPLOAD_FOLDER'], 'test_file') with open(test_filename, 'w') as f: f.write('test') except OSError as ex: # pragma: no cover _log.error("Unable to write to the upload folder '{}': {}".format( app.config['UPLOAD_FOLDER'], ex)) sys.exit(1) finally: if os.path.exists(test_filename): os.remove(test_filename) # Use ProxyFix to correct URL's when redirecting. from xssp_api.middleware import ReverseProxied app.wsgi_app = ReverseProxied(app.wsgi_app) # Initialise extensions from xssp_api import toolbar toolbar.init_app(app) # Register jinja2 filters from xssp_api.frontend.filters import beautify_docstring app.jinja_env.filters['beautify_docstring'] = beautify_docstring # Register blueprints from xssp_api.frontend.api.endpoints import bp as api_bp from xssp_api.frontend.dashboard.views import bp as dashboard_bp app.register_blueprint(api_bp) app.register_blueprint(dashboard_bp) # Database from xssp_api.storage import storage storage.uri = app.config['MONGODB_URI'] storage.db_name = app.config['MONGODB_DB_NAME'] storage.connect() return app
migrate = Migrate(app, db) login = LoginManager(app) login.login_view = "login" if not app.debug: if app.config["MAIL_SERVER"]: auth = None if app.config["MAIL_USERNAME"] or app.config["MAIL_PASSWORD"]: auth = (app.config["MAIL_USERNAME"], app.config["MAIL_PASSWORD"]) secure = None if app.config["MAIL_USE_TLS"]: secure = () mail_handler = SMTPHandler( mailhost=(app.config["MAIL_SERVER"], app.config["MAIL_PORT"]), fromaddr="no-reply@" + app.config["MAIL_SERVER"], toaddrs=app.config["ADMINS"], subject="Microblog Failure", credentials=auth, secure=secure, ) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists("logs"): os.mkdir("logs") file_handler = RotatingFileHandler("logs/microblog.log", maxBytes=10240, backupCount=10) file_handler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]" ))
def _create_mail_handler(self, mail_subject): return SMTPHandler("smtp.heig-vd.ch", "*****@*****.**", ["*****@*****.**"], mail_subject)
if user is None: record.user = '******' else: record.user = user.email except: record.user = '******' traceback.print_exc() return True app.logger.addFilter(MailLoggingFilter()) smtp_server = app.config.get("SMTP_SERVER") from_addr = app.config.get("SENDER_ADDR") to_addrs = app.config.get("ADMINS") mail_handler = SMTPHandler(smtp_server, from_addr, to_addrs, "AppComposer Application Error Report") formatter = logging.Formatter( ''' Message type: %(levelname)s Location: %(pathname)s:%(lineno)d Module: %(module)s Function: %(funcName)s Time: %(asctime)s User: %(user)s Message: %(message)s
def create_app(config_class=Config): app = Flask(__name__) app.config.from_object(config_class) db.init_app(app) migrate.init_app(app, db) login.init_app(app) app.elasticsearch = Elasticsearch(app.config['ELASTICSEARCH_URL']) if app.config['ELASTICSEARCH_URL'] else None app.redis = Redis.from_url(app.config['REDIS_URL']) app.task_queue = rq.Queue('microblog-task', connection=app.redis) mail.init_app(app) bootstrap.init_app(app) moment.init_app(app) babel.init_app(app) from app.errors import bp as error_bp app.register_blueprint(error_bp) from app.auth import bp as auth_bp app.register_blueprint(auth_bp, url_prefix="/auth") from app.main import bp as main_bp app.register_blueprint(main_bp) from app.api import bp as api_bp app.register_blueprint(api_bp, url_prefix="/api") """Logging Info Errors """ if not app.debug: if app.config["MAIL_SERVER"]: auth = None if app.config["MAIL_USERNAME"] or app.config["MAIL_PASSWORD"]: auth = (app.config["MAIL_USERNAME"], app.config["MAIL_PASSWORD"]) secure = None if app.config["MAIL_USE_TLS"]: secure = () mail_handler = SMTPHandler( mailhost=(app.config["MAIL_SERVER"], app.config["MAIL_PORT"]), fromaddr=f'no-reply@{app.config["MAIL_SERVER"]}', toaddrs=app.config["ADMINS"], subject="Microblog Failure", credentials=auth, secure=secure, ) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists("logs"): os.mkdir("logs") file_handler = RotatingFileHandler( "logs/microblog.log", maxBytes=1024, backupCount=10 ) file_handler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]" ) ) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info("Microblog Startup") return app
@app.context_processor def inject_categories(): from wuvt.blog import list_categories_cached return {'categories': list_categories_cached()} if app.debug: from werkzeug.debug import DebuggedApplication app.wsgi_app = DebuggedApplication(app.wsgi_app, True) else: import logging from logging.handlers import SMTPHandler, SysLogHandler mail_handler = SMTPHandler( app.config['SMTP_SERVER'], app.config['MAIL_FROM'], app.config['ADMINS'], "[{}] Website error".format(app.config['STATION_NAME'])) mail_handler.setFormatter( logging.Formatter(''' Message type: %(levelname)s Time: %(asctime)s %(message)s ''')) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if 'SYSLOG_ADDRESS' in app.config: syslog_handler = SysLogHandler(address=app.config['SYSLOG_ADDRESS']) syslog_handler.setLevel(logging.WARNING) app.logger.addHandler(syslog_handler)
flask.g.user = bunch.Bunch({ "username": "******", "email": "admin@localhost", "admin": True }) if not app.debug: credentials = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) mail_handler = SMTPHandler( (app.config['MAIL_SERVER'], app.config['MAIL_PORT']), 'no-reply@' + app.config['MAIL_SERVER'], app.config['ADMINS'], 'webfaf exception', credentials) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) @app.errorhandler(403) def forbidden(error): return flask.render_template("403.html"), 403 @app.errorhandler(404) def not_found(error): return flask.render_template("404.html"), 404
app = Flask(__name__) app.config.from_pyfile('defaults.cfg') app.config.from_pyfile('local.cfg') db = SQLAlchemy(app) oid = OpenID(app) # set up the logging system based on debug settings if app.debug: logging.basicConfig(level=logging.DEBUG) else: from logging.handlers import SMTPHandler mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config['ERROR_MAIL_SENDER'], app.config['ADMINS'], app.config['ERROR_MAIL_SUBJECT']) mail_handler.setFormatter(logging.Formatter('''\ Message type: %(levelname)s Location: %(pathname)s:%(lineno)d Module: %(module)s Function: %(funcName)s Time: %(asctime)s Message: %(message)s ''')) root_logger = logging.getLogger() root_logger.setLevel(logging.ERROR) root_logger.addHandler(mail_handler)
def create_app(config_class=Config): app = Flask(__name__) app.config.from_object(config_class) db.init_app(app) migrate.init_app(app, db) mail.init_app(app) user_manager.init_app(app) bootstrap.init_app(app) fa.init_app(app) excel.init_excel(app) with app.app_context(): from app.main import bp as main_bp app.register_blueprint(main_bp) # flask-admin from app.fadmin import bp as admin_bp app.register_blueprint(admin_bp) from app.fadmin.controller import admin app.config['FLASK_ADMIN_SWATCH'] = 'lumen' admin.init_app(app) # font-awesome config app.config.update( FONTAWESOME_SERVE_LOCAL=True, FONTAWESOME_USE_MINIFIED=True) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) from app.library import bp as library_bp app.register_blueprint(library_bp) appname = app.config['USER_APP_NAME'] if not app.debug and not app.testing: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr='no-reply@' + app.config['MAIL_SERVER'], toaddrs=app.config['ADMINS'], subject=appname+' Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/'+appname+'.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info(appname+' startup') return app
def create_app(config_name): app = Flask(__name__) app.config.from_object(config[config_name]) if not app.config['DEBUG'] and not app.config['TESTING']: # configure logging for production # email errors to the administrators if app.config.get('MAIL_ERROR_RECIPIENT') is not None: import logging from logging.handlers import SMTPHandler credentials = None secure = None if app.config.get('MAIL_USERNAME') is not None: credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) if app.config['MAIL_USE_TLS'] is not None: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr=app.config['MAIL_DEFAULT_SENDER'], toaddrs=[app.config['MAIL_ERROR_RECIPIENT']], subject='[Talks] Application Error', credentials=credentials, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) # send standard logs to syslog import logging from logging.handlers import SysLogHandler syslog_handler = SysLogHandler() syslog_handler.setLevel(logging.WARNING) app.logger.addHandler(syslog_handler) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # app.config['BOOTSTRAP_SERVE_LOCAL'] = True bootstrap.init_app(app) db.init_app(app) moment.init_app(app) pagedown.init_app(app) mail.init_app(app) login_manager.init_app(app) from .posts import posts as posts_blueprint app.register_blueprint(posts_blueprint) from .tags import tag as tag_blueprint app.register_blueprint(tag_blueprint) from .messages import message as message_bluepring app.register_blueprint(message_bluepring) from .comments import comment as comment_blueprint app.register_blueprint(comment_blueprint) from .auth import auth as auth_blueprint app.register_blueprint(auth_blueprint, url_prefix='/auth') from .api_1_0 import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api/1.0') with app.app_context(): db.create_all() # from app.emails import start_email_thread # @app.before_first_request # def before_first_request(): # start_email_thread() return app
# openssl genrsa 1024 > ssl.key # openssl req -new -x509 -nodes -sha1 -days 365 -key ssl.key > ssl.cert #from OpenSSL import SSL #ctx = SSL.Context(SSL.SSLv23_METHOD) #ctx.use_privatekey_file('ssl.key') #ctx.use_certificate_file('ssl.cert') app = Flask(__name__) if not app.debug: import logging from logging.handlers import SMTPHandler f = app.open_resource('config') cfg = Config(f) mail_handler = SMTPHandler(cfg.server, cfg.email, cfg.ADMINS, cfg.subject) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) @app.route('/') def pagina_teste(): try: with app.open_resource('templates/exemplo.html') as f: conteudo = f.read().decode('utf-8') resposta = make_response(conteudo) resposta.headers['Content-type'] = 'text/html; charset=utf-8' return resposta except IOError: return make_response("<h1>403 Forbidden</h1>", 403)
def create_app(config_class=Config): """ 应用工厂函数 return: app, 成了局部变量 """ app = Flask(__name__) app.config.from_object(config_class) # 初始化插件 db.init_app(app) migrate.init_app(app, db) login.init_app(app) mail.init_app(app) bootstrap.init_app(app) moment.init_app(app) babel.init_app(app) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) # 注册错误处理blueprint from app.auth import bp as auth_bp app.register_blueprint(auth_bp, url_prefix='/auth') # 注册用户认证blueprint from app.main import bp as main_bp app.register_blueprint(main_bp) # 注册主业务blueprint # elasticsearch 属性 app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \ if app.config['ELASTICSEARCH_URL'] else None if not app.debug and not app.testing: # 发送日志到邮箱: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_SSL']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr='no-reply@' + app.config['MAIL_SERVER'], toaddrs=app.config['ADMINS'], subject='Microblog Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) # 记录日志到文件中: if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('Microblog startup') return app
mail = json.load(f) logger = logging.getLogger('J4J_Orchestrator') # In trace will be sensitive information like tokens logging.addLevelName(9, "TRACE") def trace_func(self, message, *args, **kws): if self.isEnabledFor(9): # Yes, logger takes its '*args' as 'args'. self._log(9, message, args, **kws) logging.Logger.trace = trace_func mail_handler = SMTPHandler(mailhost='mail.fz-juelich.de', fromaddr='*****@*****.**', toaddrs=mail.get('receiver'), subject='J4J_Orchestrator Error') mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter( logging.Formatter( '[%(asctime)s] %(levelname)s in %(filename)s ( Line=%(lineno)d ): %(message)s' )) # Override logging.config.file_config, so that the logfilename will be send to the parser, each time the logging.conf will be updated def j4j_file_config(fname, defaults=None, disable_existing_loggers=True): if not defaults: defaults = { 'logfilename': '/etc/j4j/j4j_mount/j4j_orchestrator/logs/{}_{}_o.log'.format( socket.gethostname(), os.getpid())
import os import lockfile import logging from logging.handlers import SMTPHandler logger = logging.getLogger('rascandae') logger.setLevel(logging.DEBUG) fh = logging.FileHandler('s3upload.log') formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) sh = SMTPHandler(SMTP_SERVER, fromaddr='*****@*****.**', toaddrs=REPORT_EMAIL, subject='Feed tranform report') sh.setLevel(logging.WARNING) def sorted_ls(path): mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime return list(sorted(os.listdir(path), key=mtime)) def check_if_idle(): """ Should check if there is no shot processing currently taking place. Should check if last shot was taken at least 20 minutes ago. By checking ctime of PICTURES folder (IMPORTANT this will break on WINDOWS as ctime would mean creation time)
def create_app(): """ Set up the Flask app, cache, read app configuration file, and other things. """ import conf as configs from morpholex import MorphoLexicon # TODO: this is called twice sometimes, slowdowns have been reduced, # but don't know why yet. Need to check. It only happens on the # first POST lookup, however... with open(os.environ['NDS_CONFIG'], 'r') as F: static_prefix = yaml.load(F).get('ApplicationSettings').get('fcgi_script_path', '') os.environ['PATH'] += os.pathsep + os.path.join(os.path.dirname(__file__), 'node_modules/.bin') app = Flask(__name__, static_url_path=static_prefix+'/static', template_folder=cwd('templates')) app = jinja_options_and_filters(app) app.production = False DEFAULT_CONF = os.path.join(os.path.dirname(__file__), 'configs') app.config['cache'] = cache # TODO: make sure this isn't being specified by an env variable app.config['NDS_CONFDIR'] = os.environ.get('NDS_CONFDIR', DEFAULT_CONF) app.config['jinja_env'] = app.jinja_env app.config = Config('.', defaults=app.config) app.config.from_envvar('NDS_CONFIG') app.config.overrides = configs.blueprint.load_language_overrides(app) app.config.prepare_lexica() app.config.add_optional_routes() os.environ['NDS_PATH_PREFIX'] = app.config.fcgi_script_path app.static_url_path = app.config.fcgi_script_path + app.static_url_path # Prepare assets before custom templates are read app = prepare_assets(app) # Register rate limiter limiter = Limiter(app, global_limits=["120/minute"]) app.limiter = limiter app.config['APPLICATION_ROOT'] = app.config.fcgi_script_path # Register language specific config information import views app.register_blueprint(views.blueprint, url_prefix=app.config['APPLICATION_ROOT']) app.register_blueprint(configs.blueprint, url_prefix=app.config['APPLICATION_ROOT']) # Prepare cache cache_path = os.path.join(os.path.dirname(__file__), 'tmp/generator_cache/%s/' % app.config.short_name) cache.init_app(app, {'CACHE_TYPE': 'filesystem', 'CACHE_DIR': cache_path}) app.cache = cache with app.app_context(): app.cache.clear() app.config['cache'] = cache app.morpholexicon = MorphoLexicon(app.config) from paradigms import ParadigmConfig pc = ParadigmConfig(app) app.morpholexicon.paradigms = pc ## Read and prepare the templates from entry_template_filters import register_template_filters from entry_templates import TemplateConfig app = register_template_filters(app) app.lexicon_templates = TemplateConfig(app, debug=True) try: with open('secret_key.do.not.check.in', 'r') as F: key = F.readlines()[0].strip() app.config['SECRET_KEY'] = key except IOError: print >> sys.stderr, """ You need to generate a secret key, and store it in a file with the following name: secret_key.do.not.check.in """ sys.exit() app = register_babel(app) if not app.debug: import logging from logging.handlers import SMTPHandler mail_handler = SMTPHandler('127.0.0.1', '*****@*****.**', ADMINS, 'NDS error') mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) from logging import FileHandler from logging.handlers import SMTPHandler from socket import gethostname if app.debug: mail_handler = FileHandler('debug_email_log.txt') else: _admins = ADMINS + app.config.admins mail_handler = SMTPHandler('127.0.0.1', "server-error@%s" % gethostname(), ADMINS, "NDS-%s Failed" % app.config.short_name) app.logger.smtp_handler = mail_handler # Templates are read, register the assets register_assets(app) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) return app
def mysql_backup(args): try: with open(args.secretfile) as f: SECRETS = json.loads(f.read()) except FileNotFoundError as e: if args.verbose: sys.stderr.write(e.strerror + ":\n") sys.stderr.write(args.secretfile + "\n") else: logger.error("Secretfile %s not found" % args.secretfile) return -1 def get_secret(setting, secrets=SECRETS): """ get the secret setting or return explicit exception """ try: return secrets[setting] except KeyError: error_msg = "Set the {0} environment variable in the secret file".format( setting) raise ImproperlyConfigured(error_msg) emailSubject = "Problem with database backup!!!" try: emailHost = get_secret("EMAIL_HOST") emailUser = get_secret("EMAIL_USER") emailPort = get_secret("EMAIL_PORT") emailUseTLS = get_secret("EMAIL_USE_TLS") emailPassword = get_secret("EMAIL_PASS") emailFromUser = get_secret("EMAIL_FROM_USER") if args.adminemail == "": if args.verbose: sys.stdout.write( "No admin email specified using --email argument, no email logging enabled.\n" ) else: logger.info( "No admin email specified using --email argument, no email logging enabled." ) else: isSecure = None if emailUseTLS == "True": isSecure = () smtpHandler = SMTPHandler((emailHost, emailPort), emailFromUser, args.adminemail, emailSubject, credentials=( emailUser, emailPassword, ), secure=isSecure) smtpHandler.setLevel(logging.ERROR) logger.addHandler(smtpHandler) except ImproperlyConfigured: pass if args.testlog: logger.info("Test of logging capabilities for info messages") logger.error("Test of logging capabilities for error messages") else: for database in args.databases: backuproot = os.path.join(args.backupdir, database) backupfile = "%s.%s.sql" % (backuproot, datetime.now().isoformat()) user = get_secret(database.upper() + "_DB_USER") password = get_secret(database.upper() + "_DB_PASS") if args.verbose: dumpcommand = "mysqldump -u %s -p %s" % (user, database) sys.stdout.write( "Backing up and gzipping %s database to %s.gz\n" % (database, backupfile)) sys.stdout.write("using command: %s\n" % dumpcommand) with open(backupfile, "wb", 0) as out: try: run(["mysqldump", "-u", user, "-p" + password, database], stderr=PIPE, stdout=out, check=True) except CalledProcessError as e: if args.verbose: sys.stdout.write("database=%s Error='%s'\n" % (database, e.stderr.decode())) else: logger.error("database=%s Error='%s'" % (database, e.stderr.decode())) continue try: run(["gzip", backupfile], stderr=PIPE, check=True) except CalledProcessError as e: if args.verbose: sys.stdout.write("unable to gzip file=%s Error='%s'\n" % (backupfile, e.stderr.decode())) else: logger.error("unable to gzip file=%s Error='%s'" % (backupfile, e.stderr.decode())) continue try: os.chmod(backupfile + ".gz", 0o600, follow_symlinks=True) except OSError as e: if args.verbose: sys.stdout.write( "database=%, Unable to chmod 700 on file %s\n" % (database, backupfile)) else: logger.error("database=%, Unable to chmod 700 on file %s" % (database, backupfile)) if args.verbose: sys.stdout.write( "backed up and gzipped %s file size=%s\n" % (database, os.path.getsize(backupfile + ".gz"))) else: logger.info("backed up and gzipped %s file size=%s" % (database, os.path.getsize(backupfile + ".gz"))) # remove files older than keepdays days old if args.keepdays >= 0: now = time.time( ) - 1 # 1 second fudge factor so we don't delete a just created file if keepdays = 0 for file in glob.glob( os.path.join(args.backupdir, database + '*')): if os.path.isfile(file): mtime = os.path.getmtime(file) if now - mtime >= args.keepdays * 86400: # remove old files if DEBUG: sys.stdout.write("Deleting %s\n" % file) os.remove(file) # now copy the currently created file "backupfile" into the "Current" directory # under the backupdir directory, creating the "Current" directory if necessary # and deleting old files with the same root name pathToCurrent = os.path.join(args.backupdir, "Current") if not os.path.exists(pathToCurrent): # create Current dir try: os.mkdir(pathToCurrent) except OSError as e: if args.verbose: sys.stdout.write("unable to create %s directory\n" % pathToCurrent) else: logger.error("unable to create %s directory\n" % pathToCurrent) return -1 else: # delete links with same root name and make a link to the just created backupfile if not os.path.isdir(pathToCurrent): if args.verbose: sys.stdout.write( "unable to create %s directory it already exists as a file\n" % pathToCurrent) else: logger.error( "unable to create %s directory it already exists as a file\n" % pathToCurrent) return -1 for file in glob.glob( os.path.join(pathToCurrent, "%s*" % database)): if os.path.isfile(file): try: os.remove(file) except OSError as e: if args.verbose: sys.stdout.write( "database=%, Unable to remove current backup file %s\n" % (database, file)) else: logger.error( "database=%, Unable to remove current backup file %s" % (database, file)) continue # create link to current backupfile in "Current" try: os.link( backupfile + ".gz", os.path.join(pathToCurrent, os.path.basename(backupfile) + ".gz")) except OSError as e: if args.verbose: sys.stdout.write( "database=%, Unable to create link to current backup file %s\n" % (database, os.path.join( pathToCurrent, os.path.basename(backupfile) + ".gz"))) else: logger.error( "database=%, Unable to create link to current backup file %s" % (database, os.path.join( pathToCurrent, os.path.basename(backupfile) + ".gz"))) return 0
def create_app(): app = Flask(__name__) config = None if os.environ["FLASK_ENV"] == "production": config = ProductionConfig() elif os.environ["FLASK_ENV"] == "development": config = DevelopmentConfig() print("THIS APP IS IN DEV CONFIGURATION. DO NOT USE IN PRODUCTION.") elif os.environ["FLASK_ENV"] == "test": config = TestConfig() print("THIS APP IS IN TEST CONFIGURATION. DO NOT USE IN PRODUCTION.") elif config == None: print("NO CONFIGURATION SET.") app.config.from_object(config) db.init_app(app) migrate.init_app(app, db) login.init_app(app) mail.init_app(app) from app.scheduler_tasks.check_studies import check_studies if os.environ["FLASK_ENV"] != "test": scheduler.api_enabled = True scheduler.init_app(app) scheduler.add_job( id="check_studies_job", trigger="cron", func=check_studies, hour="*", minute=5, args=[app], ) scheduler.start() atexit.register(lambda: scheduler.shutdown(wait=False)) csrf.init_app(app) jwt.init_app(app) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) app.register_blueprint(bp, url_prefix="/") from app.auth import bp as auth_bp app.register_blueprint(auth_bp, url_prefix="/auth") from app.admin import bp as admin_bp app.register_blueprint(admin_bp, url_prefix="/admin") from app.study import bp as study_bp app.register_blueprint(study_bp, url_prefix="/study") from app.responses import bp as responses_bp app.register_blueprint(responses_bp, url_prefix="/responses") from app.api import bp as api_bp app.register_blueprint(api_bp, url_prefix="/api") # from https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-vii-error-handling if not app.debug and not app.testing: if app.config["MAIL_SERVER"]: auth = None if app.config["MAIL_USERNAME"] or app.config["MAIL_PASSWORD"]: auth = ( app.config["MAIL_USERNAME"], app.config["MAIL_PASSWORD"], ) mail_handler = SMTPHandler( mailhost=(app.config["MAIL_SERVER"], app.config["MAIL_PORT"]), fromaddr=app.config["MAIL_USERNAME"], toaddrs=app.config["ADMIN_EMAILS"], subject="User Study Error", credentials=auth, secure=() if app.config["MAIL_USE_TLS"] else None, ) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists("logs"): os.mkdir("logs") file_handler = RotatingFileHandler( "logs/userstudy.log", maxBytes=10240, backupCount=10 ) file_handler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]" ) ) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info("User Study startup") return app
# set Flask WTF CSRFProtect csrf = CSRFProtect(app) # email logged errors if not app.debug: logger = logging.getLogger(__name__) if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr=app.config['MAIL_DEFAULT_SENDER'], toaddrs=app.config['ADMINS'], subject='Stadsgids error', credentials=auth, secure=secure) mail_handler.setLevel(logging.DEBUG) logger.addHandler(mail_handler) # check if running on development server if os.getenv("PRODUCTION_SERVER") == "True": # set session cookie secure app.config["SESSION_COOKIE_SECURE"] = True # import worker from runworker import conn # set worker Queue queue = rq.Queue('default', connection=conn)
ENV = os.environ.get('ENV', 'development').lower() logger = logging.getLogger(__name__) stdout_handler = logging.StreamHandler() logger.addHandler(stdout_handler) if not ENV == 'development': logger.setLevel(logging.INFO) # Add opbeat opbeat_handler = OpbeatHandler(opbeat_client) opbeat_handler.setLevel(logging.WARN) logger.addHandler(opbeat_handler) # Add email from logging.handlers import SMTPHandler smtp_handler = SMTPHandler( mailhost=('smtp.mailgun.org', 587), fromaddr='Application Bug Reporter <*****@*****.**>', toaddrs=['*****@*****.**'], subject='Electricity Map Feeder Error', credentials=(os.environ.get('MAILGUN_USER'), os.environ.get('MAILGUN_PASSWORD'))) smtp_handler.setLevel(logging.WARN) logger.addHandler(smtp_handler) # Add statsd logging.getLogger('statsd').addHandler(stdout_handler) else: logger.setLevel(logging.DEBUG) logger.info('Feeder is starting..') # Define all production parsers CONSUMPTION_PARSERS = { 'AT': ENTSOE.fetch_consumption,
def create_app(config_class=Config): global config app = Flask(__name__) # Flask configs config = config_class app.config['SECRET_KEY'] = config.secretKey # SQLAlchemy configs app.config['SQLALCHEMY_DATABASE_URI'] = config.sqlAlchemyDatabaseUri app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # Flask Mail configs app.config['MAIL_SERVER'] = config.mailServer app.config['MAIL_PORT'] = config.mailPort app.config['MAIL_USE_TLS'] = config.mailUseTls app.config['MAIL_USERNAME'] = config.mailUsername app.config['MAIL_PASSWORD'] = config.mailPassword app.testing = config.testing db.init_app(app) migrate.init_app(app, db) login.init_app(app) mail.init_app(app) bootstrap.init_app(app) moment.init_app(app) babel.init_app(app) from app import models from app.auth import routes from app.auth import bp as auth_bp from app.errors import bp as errors_bp from app.main import bp as main_bp app.register_blueprint(errors_bp) app.register_blueprint(auth_bp, url_prefix='/auth') app.register_blueprint(main_bp) if not app.debug and not app.testing and config.mailServer: auth = None if config.mailUsername or config.mailPassword: auth = (config.mailUsername, config.mailPassword) secure = None if config.mailUseTls: secure = () mail_handler = SMTPHandler(mailhost=(config.mailServer, config.mailPort), fromaddr='no-reply@' + config.mailServer, toaddrs=config.admins, subject='Microblog Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240, backupCount=10) file_handler.setFormatter( logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]' )) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('Microblog startup') return app
oauth_github = oauth.remote_app('github', **app.config['GITHUB_OAUTH']) # QQ oauth_qq = oauth.remote_app('qq', **app.config['QQ_OAUTH']) # WeiBo oauth_weibo = oauth.remote_app('weibo', **app.config['WEIBO_OAUTH']) # Google # 要银子,妹的 # 配置日志 dictConfig(app.config['LOG_CONFIG']) if not app.config['DEBUG']: import logging from logging.handlers import SMTPHandler credentials = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) mail_handler = SMTPHandler( (app.config['MAIL_SERVER'], app.config['MAIL_PORT']), app.config['MAIL_DEFAULT_SENDER'][1], app.config['ADMINS'], 'App Error Message', credentials) mail_handler.setLevel(logging.DEBUG) app.logger.addHandler(mail_handler) # 这个 import 语句放在这里, 防止views, models import发生循环import from app import views, models, tasks
db = SQLAlchemy(app) lm = LoginManager() lm.init_app(app) lm.login_view = 'login' oid = OpenID(app, os.path.join(basedir, 'tmp')) from app import views, models if not app.debug: import logging from logging.handlers import SMTPHandler credentails = None if MAIL_USERNAME or MAIL_PASSWORD: credentails = (MAIL_USERNAME, MAIL_PASSWORD) mail_handler = SMTPHandler((MAIL_SERVER, MAIL_POST), 'no-reply@' + MAIL_SERVER, ADMINS, 'microblog failure', credentails) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not app.debug: import logging from logging.handlers import RotatingFileHandler file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1 * 1024 * 1024, 10) file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) app.logger.setLevel(logging.INFO) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.info('microblog startup')
def create_app(config_class=Config): app = Flask(__name__) app.config.from_object(config_class) db.init_app(app) migrate.init_app(app, db) mail.init_app(app) csp = { 'default-src': [ '\'self\'', '\'unsafe-inline\'', 'stackpath.bootstrapcdn.com', 'code.jquery.com', 'cdn.jsdelivr.net' ] } talisman.init_app(app, content_security_policy=csp) app.elasticsearch = Elasticsearch([ app.config['ELASTICSEARCH_URL'] ]) if app.config['ELASTICSEARCH_URL'] else None from app.main import bp as main_bp app.register_blueprint(main_bp) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) from app.telebot import bp as telebot_bp app.register_blueprint(telebot_bp) if not app.debug and not app.testing: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr='no-reply@' + app.config['MAIL_SERVER'], toaddrs=app.config['ADMINS'], subject='StockXBot Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if app.config['LOG_TO_STDOUT']: stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.INFO) app.logger.addHandler(stream_handler) else: if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/stockxbot.log', maxBytes=10240, backupCount=10) file_handler.setFormatter( logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]' )) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('StockXBot startup') return app
def create_app(config={}): app = Flask('aleph') app.config.from_object(default_settings) app.config.from_envvar('ALEPH_SETTINGS', silent=True) app.config.update(config) app_name = app.config.get('APP_NAME') if app.config.get("TESTING"): # The testing configuration is inferred from the production # settings, but it can only be derived after the config files # have actually been evaluated. database_uri = app.config.get('SQLALCHEMY_DATABASE_URI') app.config['SQLALCHEMY_DATABASE_URI'] = database_uri + '_test' es_index = app.config.get('ELASTICSEARCH_INDEX', app.config.get('APP_NAME')) app.config['ELASTICSEARCH_INDEX'] = es_index + '_test' if not app.debug and app.config.get('MAIL_ADMINS'): credentials = (app.config.get('MAIL_USERNAME'), app.config.get('MAIL_PASSWORD')) mail_handler = SMTPHandler(app.config.get('MAIL_SERVER'), app.config.get('MAIL_FROM'), app.config.get('MAIL_ADMINS'), '[%s] Crash report' % app_name, credentials=credentials, secure=()) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if 'postgres' not in app.config.get('SQLALCHEMY_DATABASE_URI', ''): raise RuntimeError("aleph database must be PostgreSQL!") queues = ( Queue(WORKER_QUEUE, routing_key=WORKER_ROUTING_KEY), Queue(USER_QUEUE, routing_key=USER_ROUTING_KEY), ) celery.conf.update( imports=('aleph.queues'), broker_url=app.config['CELERY_BROKER_URL'], task_always_eager=app.config['CELERY_ALWAYS_EAGER'], task_eager_propagates=True, task_ignore_result=True, result_persistent=False, task_queues=queues, task_default_queue=WORKER_QUEUE, task_default_routing_key=WORKER_ROUTING_KEY, # ultra-high time limit to shoot hung tasks: task_time_limit=3600 * 3, worker_max_tasks_per_child=500, worker_disable_rate_limits=True, # worker_hijack_root_logger=False, beat_schedule=app.config['CELERYBEAT_SCHEDULE'], ) celery.conf.update(app.config.get('CELERY', {})) migrate.init_app(app, db, directory=app.config.get('ALEMBIC_DIR')) configure_oauth(app) mail.init_app(app) db.init_app(app) try: ldap.init_app(app) except LDAPException as error: log.info(error) # This executes all registered init-time plugins so that other # applications can register their behaviour. for plugin in get_init(): plugin(app=app) return app
def create_app(config_class=Config): app = Flask(__name__) app.config.from_object(config_class) db.init_app(app) migrate.init_app(app, db) login.init_app(app) mail.init_app(app) bootstrap.init_app(app) moment.init_app(app) babel.init_app(app) app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \ if app.config['ELASTICSEARCH_URL'] else None app.redis = Redis.from_url(app.config['REDIS_URL']) app.task_queue = rq.Queue('microblog-tasks', connection=app.redis) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) from app.auth import bp as auth_bp app.register_blueprint(auth_bp, url_prefix='/auth') from app.main import bp as main_bp app.register_blueprint(main_bp) from app.api import bp as api_bp app.register_blueprint(api_bp, url_prefix='/api') if not app.debug and not app.testing: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler(mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr=app.config['MAIL_USERNAME'], toaddrs=app.config['ADMINS'], subject='Microblog Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if app.config['LOG_TO_STDOUT']: stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.INFO) app.logger.addHandler(stream_handler) else: if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240, backupCount=10) file_handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('Microblog startup') return app
os.environ['TARANISLOGO_CFG'] = "../dist/pytaranislogo.cfg.example" try: app.config.from_envvar('TARANISLOGO_CFG', silent=False) except RuntimeError as e: app.logger.error(e) sys.exit(2) with app.test_request_context(): if app.debug: app.logger.setLevel(logging.DEBUG) else: app.logger.setLevel(logging.INFO) from logging.handlers import SMTPHandler mail_handler = SMTPHandler(app.config['EMAILSERVER'], app.config['EMAILFROM'], app.config['ADMINS'], current_app.name + ' failed!') mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not len(app.config['APPSECRET']): app.logger.warning( "[System] Generating random secret_key. All older cookies will be invalid, but i will NOT work with multiple processes (WSGI)." ) app.secret_key = os.urandom(24) else: app.secret_key = app.config['APPSECRET'] # helpers def getInstanceSettings():
except: article_ids = None login.login_view = 'login' if not app.debug: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr='no-reply@' + app.config['MAIL_SERVER'], toaddrs=app.config['ADMINS'], subject='3bij3 Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/3bij3.log', maxBytes=10240, backupCount=10) file_handler.setFormatter( logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]' )) file_handler.setLevel(logging.INFO)
logging.getLogger('suds.client').setLevel(logging.DEBUG) logging.getLogger("requests").setLevel(logging.DEBUG) LOG_FORMAT = ( "%(asctime)s [%(levelname)s]: %(message)s in %(pathname)s:%(lineno)d") ilogger = logging.getLogger("api") ilogger.setLevel(logging.DEBUG) espa_log_dir = os.getenv('ESPA_LOG_DIR') if espa_log_dir and not os.getenv('ESPA_LOG_STDOUT'): ih = FileHandler(os.path.join(espa_log_dir, 'espa-api-info.log')) else: ih = StreamHandler(stream=sys.stdout) eh = SMTPHandler(mailhost='localhost', fromaddr=config.get('apiemailsender'), toaddrs=config.get('ESPA_API_EMAIL_RECEIVE').split(','), subject='ESPA API ERROR') if config.mode not in ('tst', 'dev'): ih.setLevel(logging.INFO) else: ih.setLevel(logging.DEBUG) eh.setLevel(logging.CRITICAL) for handler in [ih, eh]: ilogger.addHandler(handler) if isinstance(handler, logging.StreamHandler): handler.setFormatter(Formatter(LOG_FORMAT))