コード例 #1
0
ファイル: verify.py プロジェクト: JustScrapy/blog
def register_event_listener():
    for event_ in sqlalchemy_events:
        # listen transient need extra work
        if event_ == 'transient':
            event.listens_for(Base, 'init', propagate=True)(transient_state)
            continue
        event.listens_for(Session, event_)(state_transition(event_))
コード例 #2
0
    def __init__(self,
                 instruments,
                 frequency=Frequency.MINUTE,
                 table_name='bins',
                 file_name='sqlite'):
        self.__engine = create_engine(f'sqlite:///{file_name}')
        self.__DBSession = sessionmaker(bind=self.__engine)
        self.__logger = get_logger(SQLiteFeed.LOGGER_NAME)
        if table_name in table_names.keys():
            self.__bar_model = table_names[table_name]
        else:
            self.__bar_model = make_bar_model(table_name)
            table_names[table_name] = self.__bar_model

        def before_cursor_execute(conn, cursor, statement, parameters, context,
                                  executemany):
            conn.info.setdefault('query_start_time', []).append(time.time())

            self.__logger.debug(
                f'Start Query: {statement} with params: {parameters}')

        def after_cursor_execute(conn, cursor, statement, parameters, context,
                                 executemany):
            total = time.time() - conn.info['query_start_time'].pop(-1)
            self.__logger.debug("Query Complete!")
            self.__logger.debug("Total Time: %f", total)

        event.listens_for(self.__engine,
                          "before_cursor_execute")(before_cursor_execute)
        event.listens_for(self.__engine,
                          "after_cursor_execute")(after_cursor_execute)

        super(SQLiteFeed, self).__init__(frequency, instruments, None, None)

        self.bars = []
コード例 #3
0
ファイル: langstrings.py プロジェクト: driver4567/assembl
    def setup_ownership_load_event(cls, owner_class, relns):
        def load_owner_object(target, context):
            for reln in relns:
                ls = getattr(target, reln, None)
                if ls is not None:
                    ls.owner_object = target

        event.listen(owner_class, "load", load_owner_object, propagate=True)
        event.listens_for(owner_class,
                          "refresh",
                          load_owner_object,
                          propagate=True)

        def set_owner_object(target, value, old_value, initiator):
            if old_value is not None:
                old_value.owner_object = None
            if value is not None:
                value.owner_object = target

        for reln in relns:
            cls._owning_relns.append((owner_class, reln))
            event.listen(getattr(owner_class, reln),
                         "set",
                         set_owner_object,
                         propagate=True)
コード例 #4
0
    def __init__(self, cls, event_name):
        super().__init__()
        self.cls = cls
        self.event_name = event_name
        self.callbacks = defaultdict(list)

        def listener(mapper, connection, target):
            del mapper
            del connection

            state = inspect(target)
            for attr, callbacks in self.callbacks.items():
                history = state.get_history(attr, True)
                if not history.has_changes():
                    continue

                for callback in callbacks:
                    old = None
                    if history.deleted and history.deleted[0]:
                        old = history.deleted[0]  # not too clear on why this is a list

                    new = None
                    if history.added and history.added[0]:
                        new = history.added[0]  # not too clear on why this is a list

                    callback(target=target, new=new, old=old)
        event.listens_for(self.cls, self.event_name)(listener)
コード例 #5
0
def initialize_logging(settings):
    # UGLY: I don't know how to count CPU time accurately in a multithreaded environment and I'm too lazy
    #       to implement proper query counting for a multithreaded environment. Not sure how to test this
    #       reliably, I resort to checking if we run under the Werkzeug reloader as a means to decide whether or
    #       not to count CPU time and database queries when logging. Rough, but effective.
    count_cpu_usage_and_db_queries = 'WERKZEUG_RUN_MAIN' in environ
    logging_exemptions = () if 'JJ_DEBUG_ASSETS' in environ else ('/static/', '/system/admin/static/', '/system/rq/')
    query_count_increment = install_request_logger(app, count_cpu_usage_and_db_queries, getLogger('jj.request'),
                                                   logging_exemptions)
    if count_cpu_usage_and_db_queries:
        event.listens_for(Engine, "after_cursor_execute")(query_count_increment)
コード例 #6
0
def enable_time_logging(query_time_threshold):
    def before_cursor_execute(conn, cursor, statement, params, context,
                              execmany):  # pylint: disable=W0613,R0913
        conn.info.setdefault('query_start_time', []).append(time.time())

    def after_cursor_execute(conn, cursor, statement, params, context,
                             execmany):  # pylint: disable=W0613,R0913
        total = time.time() - conn.info['query_start_time'].pop(-1)
        if total > query_time_threshold:
            RUNTIME_LOGGER.debug(f' SLOW QUERY: {total:.3f} '.center(80, '-'))
            RUNTIME_LOGGER.debug(f'Params: {params}')  # pylint: disable=W1202
            RUNTIME_LOGGER.debug(statement)

    RUNTIME_LOGGER.setLevel(logging.DEBUG)
    event.listens_for(Engine, 'before_cursor_execute')(before_cursor_execute)
    event.listens_for(Engine, 'after_cursor_execute')(after_cursor_execute)
コード例 #7
0
        def wrapper(model):
            assert hasattr(model, field)

            # Discover the hooks defined by the @before and @after decorators
            hooks = {"before": [], "after": [], "after_save": []}
            for attr in dir(model):
                if not attr.startswith("__"):
                    hook = getattr(model, attr)
                    if isinstance(hook, Hook):
                        hooks[hook.hook_type].append(hook)

            # Re-order the hooks by order of definition using their order attribute
            for key in hooks:
                hooks[key] = sorted(hooks[key])

            def listener(mapper, connection, target):
                while target._state_machine_save_queue:
                    target._state_machine_save_queue.pop(0)()

            event.listens_for(model, "after_insert")(listener)
            event.listens_for(model, "after_update")(listener)

            # Save the name of the field, the enum base class and the hook list inside the class
            setattr(model, "_state_machine_field", field)
            setattr(model, "_state_machine_hooks", hooks)
            setattr(model, "_state_machine_save_queue", [])

            # Generate the `obj.{transition}()` methods
            for transition in cls.__transitions__:
                transition = BoundTransition(transition, cls)
                setattr(model, transition.name, cls._gen_apply(transition))
                setattr(
                    model,
                    "can_{name}".format(name=transition.name),
                    cls._gen_can_apply(transition),
                )

            # Generate the `is_{state}` properties
            for elem in cls:
                setattr(
                    model,
                    "is_{name}".format(name=str(elem)),
                    cls._gen_is_state(elem, field),
                )

            return model
コード例 #8
0
def database(_database, request):
    config, engine = _database
    connection = engine.connect()
    trans = connection.begin()
    configure_session(config, bind=connection)

    if request.node.get_closest_marker("allow_db_rollback"):
        session.begin_nested()
        event.listens_for(session, "after_transaction_end")(_restart_savepoint)
    else:
        session().rollback = _error_out

    yield

    session.remove()
    trans.rollback()
    connection.close()
コード例 #9
0
    def _configure_creation(self, connection):
        def do_connect(dbapi_connection, connection_record):
            # disable pysqlite's emitting of the BEGIN statement entirely.
            # also stops it from emitting COMMIT before any DDL.
            iso_level = dbapi_connection.isolation_level
            dbapi_connection.isolation_level = None
            try:
                dbapi_connection.execute("PRAGMA page_size = 5120;")
                dbapi_connection.execute("PRAGMA cache_size = 12000;")
                dbapi_connection.execute("PRAGMA foreign_keys = ON;")
                dbapi_connection.execute("PRAGMA journal_mode = WAL;")

            except:
                pass
            dbapi_connection.isolation_level = iso_level

        event.listens_for(connection, "connect")(do_connect)
コード例 #10
0
ファイル: langstrings.py プロジェクト: assembl/assembl
    def setup_ownership_load_event(cls, owner_class, relns):
        def load_owner_object(target, context):
            for reln in relns:
                ls = getattr(target, reln, None)
                if ls is not None:
                    ls.owner_object = target
        event.listen(owner_class, "load", load_owner_object, propagate=True)
        event.listens_for(owner_class, "refresh", load_owner_object, propagate=True)

        def set_owner_object(target, value, old_value, initiator):
            if old_value is not None:
                old_value.owner_object = None
            if value is not None:
                value.owner_object = target
        for reln in relns:
            cls._owning_relns.append((owner_class, reln))
            event.listen(getattr(owner_class, reln), "set", set_owner_object, propagate=True)
コード例 #11
0
ファイル: queue.py プロジェクト: faith0811/makiki
def register_to_celery(celery_broker, celery_config, async_task, max_retries=12, DBSession=None):
    def send_after_commit_tasks(session):
        if not hasattr(async_ctx, 'reged_tasks'):
            return
        for task in async_ctx.reged_tasks:
            task.send(async_api)
        delattr(async_ctx, 'reged_tasks')

    broker = 'amqp://{user}:{password}@{host}:{port}/{vhost}'.\
        format(**celery_broker)

    app = Celery(broker=broker)
    app.conf.update(**celery_config)

    async_api = app.task(max_retries=max_retries, bind=True)(async_task)
    if DBSession:
        if event:
            event.listens_for(DBSession, 'after_commit')(send_after_commit_tasks)
        else:
            raise ImportError('You must install sqlalchemy first.')

    return app, async_api
コード例 #12
0
    def test_session_events(self):
        app = flask.Flask(__name__)
        app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
        app.config['TESTING'] = True
        db = sqlalchemy.SQLAlchemy(app)

        from sqlalchemy.event import listens_for

        seen = []
        register = listens_for(db.session, 'after_commit')
        register(seen.append)

        db.session.commit()
        self.assertEqual(seen, [db.session()])
コード例 #13
0
    def test_session_events(self):
        app = flask.Flask(__name__)
        app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
        app.config['TESTING'] = True
        db = sqlalchemy.SQLAlchemy(app)

        from sqlalchemy.event import listens_for

        seen = []
        register = listens_for(db.session, 'after_commit')
        register(seen.append)

        db.session.commit()
        self.assertEqual(seen, [db.session()])
コード例 #14
0
ファイル: mysqlfeed.py プロジェクト: sjyMystery/myAlgo
    def __init__(self,
                 table_name='bars',
                 db_name='database',
                 db_username='******',
                 db_password='******',
                 connector='mysqldb',
                 db_host='127.0.0.1',
                 db_port=3306):
        self.__engine = create_engine(
            f'mysql+{connector}://{db_username}:{db_password}@{db_host}:{db_port}/{db_name}'
        )
        self.__DBSession = sessionmaker(bind=self.__engine)
        self.__logger = get_logger(MySQLFeed.LOGGER_NAME)
        self.__bar_model = make_bar_model(table_name)

        def before_cursor_execute(conn, cursor, statement, parameters, context,
                                  executemany):
            conn.info.setdefault('query_start_time', []).append(time.time())

            self.__logger.debug(
                f'Start Query: {statement} with params: {parameters}')

        def after_cursor_execute(conn, cursor, statement, parameters, context,
                                 executemany):
            total = time.time() - conn.info['query_start_time'].pop(-1)
            self.__logger.debug("Query Complete!")
            self.__logger.debug("Total Time: %f", total)

        event.listens_for(self.__engine,
                          "before_cursor_execute")(before_cursor_execute)
        event.listens_for(self.__engine,
                          "after_cursor_execute")(after_cursor_execute)

        super(MySQLFeed, self).__init__()

        self.bars = []
コード例 #15
0
ファイル: __init__.py プロジェクト: waynesun09/metadash
 def wa(f):
     if event_name in SQLALCHEMY_MAPPER_EVENTS:
         event.listens_for(entity, event_name)(f)
     elif event_name in METADASH_TO_SQLALCHEMY_EVNET_MAP:
         events = METADASH_TO_SQLALCHEMY_EVNET_MAP[event_name]
         if isinstance(events, list):
             for ev in events:
                 event.listens_for(entity, ev)(f)
         elif isinstance(events, str):
             event.listens_for(entity, events)(f)
     return f
コード例 #16
0
ファイル: sqlalchemy.py プロジェクト: kuc2477/news
def create_schedule(abc_schedule, base, mixins=None, persister=None):
    """Concrete schedule model factory.

    :param abc_schedule: Abstract base schedule to use as base.
    :type abc_schedule: Any ABC schedule from :func:`~create_abc_schedule`
        factory function.
    :param base: SQLAlchemy model base to use.
    :type base: Any SQLAlchemy model base from
        :func:`sqlalchemy.ext.declarative.declarative_base` factory function
    :param mixins: Mixins to be mixed into concrete schedule model.
    :type mixins: Iterable mixin classes.
    :param persister: Persister to use for the schedule persistence.
    :type persister: :class:`~news.persistence.ScheduleNotifier`
    :returns: Concrete schedule model based on given abc schedule.
    :rtype: :class:`~news.models.AbstractSchedule` SQLAlchemy
        implementation based on given abc schedule, model base and mixins.

    """
    mixins = mixins or tuple()
    Schedule = type('Schedule', mixins + (abc_schedule, base), {})

    # connect persister if given
    if persister:
        event.listens_for(Schedule, 'after_insert')(
            lambda mapper, connection, target:
            persister.notify_saved(target, created=True)
        )
        event.listens_for(Schedule, 'after_update')(
            lambda mapper, connection, target:
            persister.notify_saved(target, created=False)
        )
        event.listens_for(Schedule, 'after_delete')(
            lambda mapper, connection, target:
            persister.notify_deleted(target)
        )

    return Schedule
コード例 #17
0
 def enqueue_after_commit(handler_cls, *args, **kwargs):  # type: ignore
     sqlalchemy_event.listens_for(
         connection, "commit")(lambda _conn: queue.enqueue(
             async_handler_generic_task, handler_cls, *args, **kwargs))
コード例 #18
0
    def setup(self):
        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components

        localcache_cls = (SelfEmptyingCache
                          if self.running_as_script else LocalCache)
        num_mc_clients = self.num_mc_clients

        self.cache_chains = {}

        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper, LiveConfig,
                                          LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts,
                                                  (zk_username, zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.throttles = LiveList(self.zookeeper,
                                      "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.throttles = tuple()  # immutable since it's not real

        self.memcache = CMemcache(self.memcaches, num_clients=num_mc_clients)
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)

        self.stats = Stats(self.config.get('statsd_addr'),
                           self.config.get('statsd_sample_rate'))

        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
            StatsCollectingConnectionPool(keyspace,
                                          stats=self.stats,
                                          logging_name="main",
                                          server_list=self.cassandra_seeds,
                                          pool_size=self.cassandra_pool_size,
                                          timeout=2,
                                          max_retries=3,
                                          prefill=False),
        }

        perma_memcache = (CMemcache(self.permacache_memcaches,
                                    num_clients=num_mc_clients)
                          if self.permacache_memcaches else None)
        self.permacache = CassandraCacheChain(
            localcache_cls(),
            CassandraCache('permacache',
                           self.cassandra_pools[self.cassandra_default_pool],
                           read_consistency_level=self.cassandra_rcl,
                           write_consistency_level=self.cassandra_wcl),
            memcache=perma_memcache,
            lock_factory=self.make_lock)

        self.cache_chains.update(permacache=self.permacache)

        # hardcache is done after the db info is loaded, and then the
        # chains are reset to use the appropriate initial entries

        if self.stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                CMemcache(self.stalecaches, num_clients=num_mc_clients),
                self.memcache)
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        self.cache_chains.update(cache=self.cache)

        self.rendercache = MemcacheChain(
            (localcache_cls(),
             CMemcache(self.rendercaches,
                       noreply=True,
                       no_block=True,
                       num_clients=num_mc_clients)))
        self.cache_chains.update(rendercache=self.rendercache)

        self.thing_cache = CacheChain((localcache_cls(), ))
        self.cache_chains.update(thing_cache=self.thing_cache)

        #load the database info
        self.dbm = self.load_db_params()

        # can't do this until load_db_params() has been called
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True)
        self.cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        cache_chains = self.cache_chains.copy()

        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.reset_caches = reset_caches
        self.reset_caches()

        # set the modwindow
        self.MODWINDOW = timedelta(self.MODWINDOW)

        self.REDDIT_MAIN = bool(os.environ.get('REDDIT_MAIN'))

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])

        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)
        if getattr(self, 'oauth_domain', None):
            self.secure_domains.add(self.oauth_domain)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        #setup the logger
        self.log = logging.getLogger('reddit')
        self.log.addHandler(logging.StreamHandler())
        if self.debug:
            self.log.setLevel(logging.DEBUG)
        else:
            self.log.setLevel(logging.INFO)

        # set log level for pycountry which is chatty
        logging.getLogger('pycountry.db').setLevel(logging.CRITICAL)

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print("Warning: g.media_domain == g.domain. " +
                  "This may give untrusted content access to user cookies")

        self.reddit_host = socket.gethostname()
        self.reddit_pid = os.getpid()

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        #if we're going to use the query_queue, we need amqp
        if self.write_query_queue and not self.amqp_host:
            raise Exception("amqp_host must be defined to use the query queue")

        # This requirement doesn't *have* to be a requirement, but there are
        # bugs at the moment that will pop up if you violate it
        if self.write_query_queue and not self.use_query_cache:
            raise Exception("write_query_queue requires use_query_cache")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        if self.log_start:
            self.log.error("reddit app %s:%s started %s at %s" %
                           (self.reddit_host, self.reddit_pid,
                            self.short_version, datetime.now()))
コード例 #19
0
ファイル: app_globals.py プロジェクト: DamonAnderson/reddit
    def setup(self, global_conf):
        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components

        localcache_cls = (SelfEmptyingCache if self.running_as_script
                          else LocalCache)
        num_mc_clients = self.num_mc_clients

        self.cache_chains = {}

        self.memcache = CMemcache(self.memcaches, num_clients = num_mc_clients)
        self.make_lock = make_lock_factory(self.memcache)

        self.stats = Stats(global_conf.get('statsd_addr'),
                           global_conf.get('statsd_sample_rate'))

        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")


        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
                StatsCollectingConnectionPool(
                    keyspace,
                    stats=self.stats,
                    logging_name="main",
                    server_list=self.cassandra_seeds,
                    pool_size=self.cassandra_pool_size,
                    timeout=2,
                    max_retries=3,
                    prefill=False
                ),
            "noretries":
                StatsCollectingConnectionPool(
                    keyspace,
                    stats=self.stats,
                    logging_name="noretries",
                    server_list=self.cassandra_seeds,
                    pool_size=len(self.cassandra_seeds),
                    timeout=2,
                    max_retries=0,
                    prefill=False
                ),
        }

        perma_memcache = (CMemcache(self.permacache_memcaches, num_clients = num_mc_clients)
                          if self.permacache_memcaches
                          else None)
        self.permacache = CassandraCacheChain(localcache_cls(),
                                              CassandraCache('permacache',
                                                             self.cassandra_pools[self.cassandra_default_pool],
                                                             read_consistency_level = self.cassandra_rcl,
                                                             write_consistency_level = self.cassandra_wcl),
                                              memcache = perma_memcache,
                                              lock_factory = self.make_lock)

        self.cache_chains.update(permacache=self.permacache)

        # hardcache is done after the db info is loaded, and then the
        # chains are reset to use the appropriate initial entries

        if self.stalecaches:
            self.cache = StaleCacheChain(localcache_cls(),
                                         CMemcache(self.stalecaches, num_clients=num_mc_clients),
                                         self.memcache)
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        self.cache_chains.update(cache=self.cache)

        self.rendercache = MemcacheChain((localcache_cls(),
                                          CMemcache(self.rendercaches,
                                                    noreply=True, no_block=True,
                                                    num_clients = num_mc_clients)))
        self.cache_chains.update(rendercache=self.rendercache)

        self.thing_cache = CacheChain((localcache_cls(),))
        self.cache_chains.update(thing_cache=self.thing_cache)

        #load the database info
        self.dbm = self.load_db_params(global_conf)

        # can't do this until load_db_params() has been called
        self.hardcache = HardcacheChain((localcache_cls(),
                                         self.memcache,
                                         HardCache(self)),
                                        cache_negative_results = True)
        self.cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        cache_chains = self.cache_chains.copy()
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.reset_caches = reset_caches
        self.reset_caches()

        #make a query cache
        self.stats_collector = QueryStats()

        # set the modwindow
        self.MODWINDOW = timedelta(self.MODWINDOW)

        self.REDDIT_MAIN = bool(os.environ.get('REDDIT_MAIN'))

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])
        
        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)


        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        #setup the logger
        self.log = logging.getLogger('reddit')
        self.log.addHandler(logging.StreamHandler())
        if self.debug:
            self.log.setLevel(logging.DEBUG)
        else:
            self.log.setLevel(logging.INFO)

        # set log level for pycountry which is chatty
        logging.getLogger('pycountry.db').setLevel(logging.CRITICAL)

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print ("Warning: g.media_domain == g.domain. " +
                   "This may give untrusted content access to user cookies")

        self.reddit_host = socket.gethostname()
        self.reddit_pid  = os.getpid()

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        #if we're going to use the query_queue, we need amqp
        if self.write_query_queue and not self.amqp_host:
            raise Exception("amqp_host must be defined to use the query queue")

        # This requirement doesn't *have* to be a requirement, but there are
        # bugs at the moment that will pop up if you violate it
        if self.write_query_queue and not self.use_query_cache:
            raise Exception("write_query_queue requires use_query_cache")

        # try to set the source control revision number
        try:
            self.version = subprocess.check_output(["git", "rev-parse", "HEAD"])
        except subprocess.CalledProcessError, e:
            self.log.info("Couldn't read source revision (%r)" % e)
            self.version = self.short_version = '(unknown)'
コード例 #20
0
ファイル: __init__.py プロジェクト: wujm2007/medscrawler
 def after_commit(self, func):
     event.listens_for(self, 'after_commit', once=True)(adapt(func))
コード例 #21
0
from sqlalchemy import event

from API import *

from flask_sqlalchemy import SQLAlchemy

project_path = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_path, "YCM.db"))
app.config["SQLALCHEMY_DATABASE_URI"] = database_file
db = SQLAlchemy(app)

event.listens_for(db.get_engine(), 'connect')


def create_math_functions_on_connect(dbapi_connection, connection_record):
    dbapi_connection.create_function('sin', 1, math.sin)
    dbapi_connection.create_function('cos', 1, math.cos)
    dbapi_connection.create_function('acos', 1, math.acos)
    dbapi_connection.create_function('radians', 1, math.radians)
コード例 #22
0
ファイル: file.py プロジェクト: girder/dkc-experiment
        self.status = FileStatus.READY

    def open(self, *args, **kwargs):
        return self.filesystem.fs.open(self.path, *args, **kwargs)

    def delete_blob(self):
        self.status = FileStatus.DELETING
        self.filesystem.fs.remove(self.path)
        self.status = FileStatus.DELETED

    @classmethod
    def _delete_file_blob(cls, mapper, connection, target):
        try:
            target.delete_blob()
        except Exception as e:
            current_app.logger.exception(e)


event.listens_for(File, 'after_delete', File._delete_file_blob)


class FileSchema(BaseSchema):
    __model__ = File

    path = fields.Str(required=True, validate=validate.Regexp(_path_regexp))
    size = fields.Int(missing=0, validate=validate.Range(min=0))
    status = fields.Int(missing=FileStatus.CREATED,
                        validate=validate.OneOf(FileStatus))
    filesystem_id = fields.UUID(required=True, load_only=True)
    filesystem = fields.Nested('FilesystemSchema', dump_only=True)
コード例 #23
0
ファイル: __init__.py プロジェクト: renalreg/radar
 def make_connector(self, app=None, bind=None):
     connector = super(SQLAlchemy, self).make_connector(app, bind)
     engine = connector.get_engine()
     event.listens_for(engine, 'engine_connect')(ping_connection)
     return connector
コード例 #24
0
ファイル: app_globals.py プロジェクト: debanshuk/reddit
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        # This requirement doesn't *have* to be a requirement, but there are
        # bugs at the moment that will pop up if you violate it
        # XXX: get rid of these options. new query cache is always on.
        if self.write_query_queue and not self.use_query_cache:
            raise Exception("write_query_queue requires use_query_cache")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])

        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)
        if getattr(self, 'oauth_domain', None):
            self.secure_domains.add(self.oauth_domain)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        #setup the logger
        self.log = logging.getLogger('reddit')
        self.log.addHandler(logging.StreamHandler())
        if self.debug:
            self.log.setLevel(logging.DEBUG)
        else:
            self.log.setLevel(logging.INFO)

        # set log level for pycountry which is chatty
        logging.getLogger('pycountry.db').setLevel(logging.CRITICAL)

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print ("Warning: g.media_domain == g.domain. " +
                   "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid  = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper,
                                          LiveConfig, LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username,
                                                             zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.throttles = LiveList(self.zookeeper, "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.throttles = tuple()  # immutable since it's not real
        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(self.memcaches, num_clients=num_mc_clients)

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches,
                                             num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches,
                                    num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates.
        rendercaches = CMemcache(
            self.rendercaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
        )

        # pagecaches hold fully rendered pages
        pagecaches = CMemcache(
            self.pagecaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
        )

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
                StatsCollectingConnectionPool(
                    keyspace,
                    stats=self.stats,
                    logging_name="main",
                    server_list=self.cassandra_seeds,
                    pool_size=self.cassandra_pool_size,
                    timeout=4,
                    max_retries=3,
                    prefill=False
                ),
        }

        permacache_cf = CassandraCache(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl
        )

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        self.cache_chains = {}
        localcache_cls = (SelfEmptyingCache if self.running_as_script
                          else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.memcache,
            )
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        self.cache_chains.update(cache=self.cache)

        self.rendercache = MemcacheChain((
            localcache_cls(),
            rendercaches,
        ))
        self.cache_chains.update(rendercache=self.rendercache)

        self.pagecache = MemcacheChain((
            localcache_cls(),
            pagecaches,
        ))
        self.cache_chains.update(pagecache=self.pagecache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(),))
        self.cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(),
            permacache_cf,
            memcache=permacache_memcaches,
            lock_factory=self.make_lock,
        )
        self.cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True,
        )
        self.cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        cache_chains = self.cache_chains.copy()
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")
コード例 #25
0
ファイル: routes.py プロジェクト: RuslanGlaznyov/webstore
        form = request.form
        id = int(form.get('good_id'))
        session['cart'].append(id)
        flash('добавили в корзину')
        return redirect(url_for('cart'))
    else:
        return redirect(url_for('index'))


@app.route('/delete_from_cart', methods=['POST', 'GET'])
def delete_from_cart():
    if request.method == 'POST':
        if 'cart' not in session:
            session['cart'] = []

        form = request.form
        id = int(form.get('good_id'))
        session['cart'].remove(id)
        flash('')
        return redirect(url_for('cart'))


#admin decoretor
listens_for(Good, 'after_delete')(del_image)

admin.add_view(GoodView(Good, db.session))
admin.add_view(CategoryView(Category, db.session))
admin.add_view(OrderView(Order, db.session))
admin.add_view(UserView(User, db.session))
コード例 #26
0
ファイル: environment.py プロジェクト: purusharths/fts-rest
def load_environment(global_conf, app_conf):
    """Configure the Pylons environment via the ``pylons.config``
    object
    """
    # Pylons paths
    root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    paths = dict(root=root,
                 controllers=os.path.join(root, 'controllers'),
                 static_files=os.path.join(root, 'public'),
                 templates=[os.path.join(root, 'templates')])

    # Initialize config with the basic options
    if is_pylons_0:
        config = pylons_config
    else:
        config = PylonsConfig()
    config.init_app(global_conf, app_conf, package='fts3rest', paths=paths)

    config['routes.map'] = make_map(config)
    config['pylons.app_globals'] = app_globals.Globals(config)
    config['pylons.h'] = fts3rest.lib.helpers

    # Setup cache object as early as possible
    import pylons
    pylons.cache._push_object(config['pylons.app_globals'].cache)

    # If fts3.config is set, load configuration from there
    fts3_config_file = config.get('fts3.config')
    if fts3_config_file:
        fts3cfg = fts3_config_load(fts3_config_file)
        # Let the database be overriden by fts3rest.ini
        if 'sqlalchemy.url' in config and 'sqlalchemy.url' in fts3cfg:
            del fts3cfg['sqlalchemy.url']
        config.update(fts3cfg)

    # Setup the SQLAlchemy database engine
    kwargs = dict()
    if config['sqlalchemy.url'].startswith('mysql://'):
        import MySQLdb.cursors
        kwargs['connect_args'] = {'cursorclass': MySQLdb.cursors.SSCursor}
    engine = engine_from_config(config,
                                'sqlalchemy.',
                                pool_recycle=7200,
                                **kwargs)
    init_model(engine)

    # Disable for sqlite the isolation level to work around issues with savepoints
    if config['sqlalchemy.url'].startswith('sqlite'):

        @event.listens_for(engine, "connect")
        def do_connect(dbapi_connection, connection_record):
            dbapi_connection.isolation_level = None

    # Catch dead connections
    event.listens_for(engine, 'checkout')(connection_validator)
    event.listens_for(engine, 'connect')(connection_set_sqlmode)

    # Mako templating
    config['pylons.app_globals'].mako_lookup = TemplateLookup(
        directories=paths['templates'], )

    # CONFIGURATION OPTIONS HERE (note: all config options will override
    # any Pylons config options)
    return config
コード例 #27
0
ファイル: app_globals.py プロジェクト: 99plus2/reddit
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])

        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)
        if getattr(self, "oauth_domain", None):
            self.secure_domains.add(self.oauth_domain)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get("static_files"), "static")
        names_file_path = os.path.join(static_files, "names.json")
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        # if we're a web app running on old uwsgi, force load the logging
        # config from the file since uwsgi didn't do it for us
        if not self.running_as_script and self.old_uwsgi_load_logging_config:
            logging.config.fileConfig(self.config["__file__"])

        # make python warnings go through the logging system
        logging.captureWarnings(capture=True)

        log = logging.getLogger("reddit")

        # when we're a script (paster run) just set up super simple logging
        if self.running_as_script:
            log.setLevel(logging.INFO)
            log.addHandler(logging.StreamHandler())

        # if in debug mode, override the logging level to DEBUG
        if self.debug:
            log.setLevel(logging.DEBUG)

        # attempt to figure out which pool we're in and add that to the
        # LogRecords.
        try:
            with open("/etc/ec2_asg", "r") as f:
                pool = f.read().strip()
            # clean up the pool name since we're putting stuff after "-"
            pool = pool.partition("-")[0]
        except IOError:
            pool = "reddit-app"
        self.log = logging.LoggerAdapter(log, {"pool": pool})

        # make cssutils use the real logging system
        csslog = logging.getLogger("cssutils")
        cssutils.log.setLog(csslog)

        # load the country list
        countries_file_path = os.path.join(static_files, "countries.json")
        try:
            with open(countries_file_path) as handle:
                self.countries = json.load(handle)
            self.log.debug("Using countries.json.")
        except IOError:
            self.log.warning("Couldn't find countries.json. Using pycountry.")
            self.countries = get_countries_and_codes()

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print("Warning: g.media_domain == g.domain. " + "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid = os.getpid()

        if hasattr(signal, "SIGUSR1"):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import connect_to_zookeeper, LiveConfig, LiveList

            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username, zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.throttles = LiveList(
                self.zookeeper, "/throttles", map_fn=ipaddress.ip_network, reduce_fn=ipaddress.collapse_addresses
            )
            self.banned_domains = LiveDict(self.zookeeper, "/banned-domains", watch=True)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.throttles = tuple()  # immutable since it's not real
            self.banned_domains = dict()
        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(self.memcaches, num_clients=num_mc_clients)

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches, num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches, num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches, num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates.
        rendercaches = CMemcache(self.rendercaches, noreply=True, no_block=True, num_clients=num_mc_clients)

        # pagecaches hold fully rendered pages
        pagecaches = CMemcache(self.pagecaches, noreply=True, no_block=True, num_clients=num_mc_clients)

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main": StatsCollectingConnectionPool(
                keyspace,
                stats=self.stats,
                logging_name="main",
                server_list=self.cassandra_seeds,
                pool_size=self.cassandra_pool_size,
                timeout=4,
                max_retries=3,
                prefill=False,
            )
        }

        permacache_cf = CassandraCache(
            "permacache",
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl,
        )

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        event.listens_for(engine.Engine, "before_cursor_execute")(self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, "after_cursor_execute")(self.stats.pg_after_cursor_execute)

        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        self.cache_chains = {}
        localcache_cls = SelfEmptyingCache if self.running_as_script else LocalCache

        if stalecaches:
            self.cache = StaleCacheChain(localcache_cls(), stalecaches, self.memcache)
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        self.cache_chains.update(cache=self.cache)

        self.rendercache = MemcacheChain((localcache_cls(), rendercaches))
        self.cache_chains.update(rendercache=self.rendercache)

        self.pagecache = MemcacheChain((localcache_cls(), pagecaches))
        self.cache_chains.update(pagecache=self.pagecache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(),))
        self.cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(), permacache_cf, memcache=permacache_memcaches, lock_factory=self.make_lock
        )
        self.cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain((localcache_cls(), self.memcache, HardCache(self)), cache_negative_results=True)
        self.cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        cache_chains = self.cache_chains.copy()

        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")
コード例 #28
0
ファイル: hook.py プロジェクト: tryer3000/flask-app
def before_first_req():
    event.listens_for(db.engine,
                      'before_cursor_execute')(receive_before_cursor_execute)
    event.listens_for(db.engine,
                      'after_cursor_execute')(receive_after_cursor_execute)
コード例 #29
0
ファイル: __init__.py プロジェクト: wujm2007/medscrawler
 def after_rollback(self, func):
     event.listens_for(self, 'after_soft_rollback',
                       once=True)(adapt(func))
コード例 #30
0
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        # This requirement doesn't *have* to be a requirement, but there are
        # bugs at the moment that will pop up if you violate it
        # XXX: get rid of these options. new query cache is always on.
        if self.write_query_queue and not self.use_query_cache:
            raise Exception("write_query_queue requires use_query_cache")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])

        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)
        if getattr(self, 'oauth_domain', None):
            self.secure_domains.add(self.oauth_domain)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        #setup the logger
        self.log = logging.getLogger('reddit')
        self.log.addHandler(logging.StreamHandler())
        if self.debug:
            self.log.setLevel(logging.DEBUG)
        else:
            self.log.setLevel(logging.INFO)

        # set log level for pycountry which is chatty
        logging.getLogger('pycountry.db').setLevel(logging.CRITICAL)

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print ("Warning: g.media_domain == g.domain. " +
                   "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid  = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper,
                                          LiveConfig, LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username,
                                                             zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.throttles = LiveList(self.zookeeper, "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.throttles = tuple()  # immutable since it's not real
        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(self.memcaches, num_clients=num_mc_clients)

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches,
                                             num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches,
                                    num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates as well as fully
        # cached pages.
        rendercaches = CMemcache(
            self.rendercaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
        )

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
                StatsCollectingConnectionPool(
                    keyspace,
                    stats=self.stats,
                    logging_name="main",
                    server_list=self.cassandra_seeds,
                    pool_size=self.cassandra_pool_size,
                    timeout=2,
                    max_retries=3,
                    prefill=False
                ),
        }

        permacache_cf = CassandraCache(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl
        )

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        self.cache_chains = {}
        localcache_cls = (SelfEmptyingCache if self.running_as_script
                          else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.memcache,
            )
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        self.cache_chains.update(cache=self.cache)

        self.rendercache = MemcacheChain((
            localcache_cls(),
            rendercaches,
        ))
        self.cache_chains.update(rendercache=self.rendercache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(),))
        self.cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(),
            permacache_cf,
            memcache=permacache_memcaches,
            lock_factory=self.make_lock,
        )
        self.cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True,
        )
        self.cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        cache_chains = self.cache_chains.copy()
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")
コード例 #31
0
 def __init__(self, engine=None, **kvargs):
     # setup regex support for sqlalchemy
     # this hooks up the callback in sqlite
     listens_for(engine, "begin")(self.hookup_regex)
コード例 #32
0
ファイル: db.py プロジェクト: cpanse/ms_deisotope
 def __call__(self):
     connection = create_engine(
         self.connection_url, connect_args=self.connect_args,
         **self.engine_args)
     event.listens_for(connection, 'connect')(self.on_connect)
     return connection
コード例 #33
0
ファイル: core.py プロジェクト: kriskavalieri/postschema
def create_model(schema_cls, info_logger):  # noqa
    ALLOWED_HOOKS = {'before_create', 'after_create'}
    name = schema_cls.__name__
    methods = dict(schema_cls.__dict__)
    try:
        tablename = methods.get('__tablename__',
                                getattr(schema_cls, '__tablename__'))
        model_methods = {'__tablename__': tablename}
    except KeyError:
        raise AttributeError(f'{name} needs to define `__tablename__`')

    meta = methods.get('Meta')
    hooks = methods.get('Hooks')
    declared_fields = methods['_declared_fields']

    if hasattr(meta, '__table_args__'):
        model_methods['__table_args__'] = meta.__table_args__

    if hooks:
        hook_methods = {
            attr
            for attr in dir(hooks)
            if not attr.startswith('__') and callable(getattr(hooks, attr))
        }
        hooks = {
            attr: getattr(hooks, attr)
            for attr in hook_methods & ALLOWED_HOOKS
        }

    id_constraints = []
    indexes = {}

    for fieldname, field_attrs in declared_fields.items():
        if isinstance(field_attrs, fields.Field):
            if isinstance(field_attrs, postschema_fields.AutoSessionField):
                perms = getattr(methods.get('Public'), 'permissions', None)
                if perms and hasattr(
                        perms,
                        'post') and 'primary_key' in field_attrs.metadata:
                    # auto-injected primary key is based on the session context,
                    # so we can't allow public posts.
                    raise AttributeError(
                        f"{name} can't include 'post' as a public permission attribute"
                    )

            metadata = field_attrs.metadata
            try:
                field_instance = metadata.pop('sqlfield',
                                              None) or metadata['fk']
                if not field_instance:
                    continue
            except KeyError:
                # skip fields with no sql bindings
                continue
            except AttributeError:
                raise AttributeError(
                    f'Schema field `{fieldname}` needs to define a SQLAlchemy field instance'
                )

            translated = {}
            default_value = field_attrs.default
            if default_value != missing:
                translated['server_default'] = default_value() if callable(
                    default_value) else default_value

            args = []
            if 'fk' in metadata:
                args.append(metadata['fk'])
            if 'autoincrement' in metadata:
                args.append(metadata.pop('autoincrement'))
            metadict = metadata.copy()
            metadict.pop('fk', None)
            metadict.pop('read_only', None)
            metadict.pop('is_aware', None)
            if metadict.pop('gist_index', False):
                indexes[f'{fieldname}_gist_idx'] = [
                    tablename, fieldname, 'gist'
                ]
            if metadict.pop('gin_index', False):
                indexes[f'{fieldname}_gin_idx'] = [tablename, fieldname, 'gin']
            identity_constraint = metadict.pop('identity_constraint', {})
            model_methods[fieldname] = sql.Column(field_instance, *args,
                                                  **metadict, **translated)

            # parse identity_constraint
            if identity_constraint:
                identity_constraint['target_table_local_ref'] = fieldname
                identity_constraint['tablename'] = tablename
                id_constraints.append(identity_constraint)

    modelname = name + 'Model'
    new_model = type(modelname, (Base, ), model_methods)
    if hooks:
        for method, method_fn in hooks.items():
            event.listens_for(Base.metadata.tables[tablename],
                              method)(method_fn)

    for index_name, index_items in indexes.items():
        tablename, fieldname, index_type = index_items
        add_index(Base.metadata, index_name, tablename, fieldname, index_type)

    for id_constraint in id_constraints:
        add_identity_triggers(Base.metadata, id_constraint)

    info_logger.debug(f"- created model `{modelname}`")
    return new_model
コード例 #34
0
ファイル: app_globals.py プロジェクト: Anenome/reddit
    def setup(self):
        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components

        localcache_cls = (SelfEmptyingCache if self.running_as_script
                          else LocalCache)
        num_mc_clients = self.num_mc_clients

        self.cache_chains = {}

        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper,
                                          LiveConfig, LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username,
                                                             zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.throttles = LiveList(self.zookeeper, "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.throttles = tuple()  # immutable since it's not real

        self.memcache = CMemcache(self.memcaches, num_clients = num_mc_clients)
        self.lock_cache = CMemcache(self.lockcaches, num_clients=num_mc_clients)

        self.stats = Stats(self.config.get('statsd_addr'),
                           self.config.get('statsd_sample_rate'))

        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")


        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
                StatsCollectingConnectionPool(
                    keyspace,
                    stats=self.stats,
                    logging_name="main",
                    server_list=self.cassandra_seeds,
                    pool_size=self.cassandra_pool_size,
                    timeout=2,
                    max_retries=3,
                    prefill=False
                ),
        }

        perma_memcache = (CMemcache(self.permacache_memcaches, num_clients = num_mc_clients)
                          if self.permacache_memcaches
                          else None)
        self.permacache = CassandraCacheChain(localcache_cls(),
                                              CassandraCache('permacache',
                                                             self.cassandra_pools[self.cassandra_default_pool],
                                                             read_consistency_level = self.cassandra_rcl,
                                                             write_consistency_level = self.cassandra_wcl),
                                              memcache = perma_memcache,
                                              lock_factory = self.make_lock)

        self.cache_chains.update(permacache=self.permacache)

        # hardcache is done after the db info is loaded, and then the
        # chains are reset to use the appropriate initial entries

        if self.stalecaches:
            self.cache = StaleCacheChain(localcache_cls(),
                                         CMemcache(self.stalecaches, num_clients=num_mc_clients),
                                         self.memcache)
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        self.cache_chains.update(cache=self.cache)

        self.rendercache = MemcacheChain((localcache_cls(),
                                          CMemcache(self.rendercaches,
                                                    noreply=True, no_block=True,
                                                    num_clients = num_mc_clients)))
        self.cache_chains.update(rendercache=self.rendercache)

        self.thing_cache = CacheChain((localcache_cls(),))
        self.cache_chains.update(thing_cache=self.thing_cache)

        #load the database info
        self.dbm = self.load_db_params()

        # can't do this until load_db_params() has been called
        self.hardcache = HardcacheChain((localcache_cls(),
                                         self.memcache,
                                         HardCache(self)),
                                        cache_negative_results = True)
        self.cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        cache_chains = self.cache_chains.copy()
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.reset_caches = reset_caches
        self.reset_caches()

        # set the modwindow
        self.MODWINDOW = timedelta(self.MODWINDOW)

        self.REDDIT_MAIN = bool(os.environ.get('REDDIT_MAIN'))

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain
        self.secure_domains = set([urlparse(self.payment_domain).netloc])
        
        self.trusted_domains = set([self.domain])
        self.trusted_domains.update(self.authorized_cnames)
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.secure_domains.add(https_url.netloc)
            self.trusted_domains.add(https_url.hostname)
        if getattr(self, 'oauth_domain', None):
            self.secure_domains.add(self.oauth_domain)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        #setup the logger
        self.log = logging.getLogger('reddit')
        self.log.addHandler(logging.StreamHandler())
        if self.debug:
            self.log.setLevel(logging.DEBUG)
        else:
            self.log.setLevel(logging.INFO)

        # set log level for pycountry which is chatty
        logging.getLogger('pycountry.db').setLevel(logging.CRITICAL)

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print ("Warning: g.media_domain == g.domain. " +
                   "This may give untrusted content access to user cookies")

        self.reddit_host = socket.gethostname()
        self.reddit_pid  = os.getpid()

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        #if we're going to use the query_queue, we need amqp
        if self.write_query_queue and not self.amqp_host:
            raise Exception("amqp_host must be defined to use the query queue")

        # This requirement doesn't *have* to be a requirement, but there are
        # bugs at the moment that will pop up if you violate it
        if self.write_query_queue and not self.use_query_cache:
            raise Exception("write_query_queue requires use_query_cache")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        if self.log_start:
            self.log.error("reddit app %s:%s started %s at %s" %
                           (self.reddit_host, self.reddit_pid,
                            self.short_version, datetime.now()))
コード例 #35
0
ファイル: database.py プロジェクト: gamcil/fungphy
DB_PATH = os.getenv("FUNGPHY_DB") or "fungphy.db"
engine = create_engine(f"sqlite:///{DB_PATH}")
session = scoped_session(
    sessionmaker(bind=engine, autocommit=False, autoflush=False))

naming_convention = {
    "ix": "ix_%(column_0_label)s",
    "uq": "uq_%(table_name)s_%(column_0_name)s",
    "ck": "ck_%(table_name)s_%(column_0_name)s",
    "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
    "pk": "pk_%(table_name)s"
}
meta = MetaData(naming_convention=naming_convention)
Base = declarative_base(metadata=meta)
Base.query = session.query_property()


def init_db():
    import fungphy.models
    configure_mappers()
    Base.metadata.create_all(engine)


event.listens_for(Engine, "connect")


def set_sqlite_pragma(dbapi_connection, connection_record):
    cursor = dbapi_connection.cursor()
    cursor.execute("PRAGMA foreign_keys=ON")
    cursor.close()
コード例 #36
0
ファイル: models.py プロジェクト: eljog/blog-ms-swdv630
def polymorphic_fallback(mapper_klass):
    event.listens_for(mapper_klass,
                      'mapper_configured')(receive_mapper_configured)
    return mapper_klass
コード例 #37
0
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# PROVIDERS
        self.media_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.media",
            self.media_provider,
        )
        self.startup_timer.intermediate("providers")

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain

        self.trusted_domains = set([self.domain])
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.trusted_domains.add(https_url.hostname)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        # make python warnings go through the logging system
        logging.captureWarnings(capture=True)

        log = logging.getLogger('reddit')

        # when we're a script (paster run) just set up super simple logging
        if self.running_as_script:
            log.setLevel(logging.INFO)
            log.addHandler(logging.StreamHandler())

        # if in debug mode, override the logging level to DEBUG
        if self.debug:
            log.setLevel(logging.DEBUG)

        # attempt to figure out which pool we're in and add that to the
        # LogRecords.
        try:
            with open("/etc/ec2_asg", "r") as f:
                pool = f.read().strip()
            # clean up the pool name since we're putting stuff after "-"
            pool = pool.partition("-")[0]
        except IOError:
            pool = "reddit-app"
        self.log = logging.LoggerAdapter(log, {"pool": pool})

        # make cssutils use the real logging system
        csslog = logging.getLogger("cssutils")
        cssutils.log.setLog(csslog)

        # set locations
        self.locations = {}

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print >> sys.stderr, (
                "Warning: g.media_domain == g.domain. " +
                "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        locale.setlocale(locale.LC_ALL, self.locale)

        # Pre-calculate ratelimit values
        self.RL_RESET_SECONDS = self.config["RL_RESET_MINUTES"] * 60
        self.RL_MAX_REQS = int(self.config["RL_AVG_REQ_PER_SEC"] *
                               self.RL_RESET_SECONDS)

        self.RL_OAUTH_RESET_SECONDS = self.config["RL_OAUTH_RESET_MINUTES"] * 60
        self.RL_OAUTH_MAX_REQS = int(self.config["RL_OAUTH_AVG_REQ_PER_SEC"] *
                                     self.RL_OAUTH_RESET_SECONDS)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper, LiveConfig,
                                          LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts,
                                                  (zk_username, zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.secrets = fetch_secrets(self.zookeeper)
            self.throttles = LiveList(self.zookeeper,
                                      "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.optionxform = str
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.secrets = extract_secrets(parser)
            self.throttles = tuple()  # immutable since it's not real

        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(
            self.memcaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just used for @memoize results
        memoizecaches = CMemcache(
            self.memoizecaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just for srmember rels
        srmembercaches = CMemcache(
            self.srmembercaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        ratelimitcaches = CMemcache(
            self.ratelimitcaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
        )

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches,
                                             min_compress_len=50 * 1024,
                                             num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches,
                                    num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates.
        rendercaches = CMemcache(
            self.rendercaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=480,
        )

        # pagecaches hold fully rendered pages
        pagecaches = CMemcache(
            self.pagecaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=1400,
        )

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
            StatsCollectingConnectionPool(keyspace,
                                          stats=self.stats,
                                          logging_name="main",
                                          server_list=self.cassandra_seeds,
                                          pool_size=self.cassandra_pool_size,
                                          timeout=4,
                                          max_retries=3,
                                          prefill=False),
        }

        permacache_cf = CassandraCache(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl)

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        event.listens_for(engine.Engine, 'before_cursor_execute')(
            self.stats.pg_before_cursor_execute)
        event.listens_for(engine.Engine, 'after_cursor_execute')(
            self.stats.pg_after_cursor_execute)

        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        cache_chains = {}
        localcache_cls = (SelfEmptyingCache
                          if self.running_as_script else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.memcache,
            )
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        cache_chains.update(cache=self.cache)

        if stalecaches:
            self.memoizecache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                memoizecaches,
            )
        else:
            self.memoizecache = MemcacheChain(
                (localcache_cls(), memoizecaches))
        cache_chains.update(memoizecache=self.memoizecache)

        if stalecaches:
            self.srmembercache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                srmembercaches,
            )
        else:
            self.srmembercache = MemcacheChain(
                (localcache_cls(), srmembercaches))
        cache_chains.update(srmembercache=self.srmembercache)

        self.ratelimitcache = MemcacheChain(
            (localcache_cls(), ratelimitcaches))
        cache_chains.update(ratelimitcaches=self.ratelimitcache)

        self.rendercache = MemcacheChain((
            localcache_cls(),
            rendercaches,
        ))
        cache_chains.update(rendercache=self.rendercache)

        self.pagecache = MemcacheChain((
            localcache_cls(),
            pagecaches,
        ))
        cache_chains.update(pagecache=self.pagecache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(), ))
        cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(),
            permacache_cf,
            memcache=permacache_memcaches,
            lock_factory=self.make_lock,
        )
        cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True,
        )
        cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.cache_chains = cache_chains

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")