コード例 #1
0
ファイル: synchronize.py プロジェクト: ELVIS-Project/music21
    def __init__(self, kind, value, maxvalue):
        # unlink_now is only used on win32 or when we are using fork.
        unlink_now = False
        for i in range(100):
            try:
                self._semlock = _SemLock(
                    kind, value, maxvalue, SemLock._make_name(),
                    unlink_now)
            except FileExistsError:  # pragma: no cover
                pass
            else:
                break
        else:  # pragma: no cover
            raise FileExistsError('cannot find name for semaphore')

        util.debug('created semlock with handle %s and name "%s"'
                   % (self._semlock.handle, self._semlock.name))

        self._make_methods()

        def _after_fork(obj):
            obj._semlock._after_fork()

        util.register_after_fork(self, _after_fork)

        # When the object is garbage collected or the
        # process shuts down we unlink the semaphore name
        semaphore_tracker.register(self._semlock.name)
        util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
                      exitpriority=0)
コード例 #2
0
ファイル: queues.py プロジェクト: MartinThoma/scikit-learn
    def __init__(self, maxsize=0, reducers=None, ctx=None):

        if sys.version_info[:2] >= (3, 4):
            super().__init__(maxsize=maxsize, ctx=ctx)
        else:
            if maxsize <= 0:
                # Can raise ImportError (see issues #3770 and #23400)
                maxsize = SEM_VALUE_MAX
            if ctx is None:
                ctx = get_context()
            self._maxsize = maxsize
            self._reader, self._writer = connection.Pipe(duplex=False)
            self._rlock = ctx.Lock()
            self._opid = os.getpid()
            if sys.platform == 'win32':
                self._wlock = None
            else:
                self._wlock = ctx.Lock()
            self._sem = ctx.BoundedSemaphore(maxsize)

            # For use by concurrent.futures
            self._ignore_epipe = False

            self._after_fork()

            if sys.platform != 'win32':
                util.register_after_fork(self, Queue._after_fork)

        self._reducers = reducers
コード例 #3
0
ファイル: database.py プロジェクト: saganshul/owtf
    def create_engine(self, base):
        """Create the SQLAlchemy engine with parameters

        :return: None
        :rtype: None
        """
        try:
            engine = create_engine(
                "postgresql+psycopg2://%s:%s@%s:%s/%s" % (
                    self._db_settings['DATABASE_USER'],
                    self._db_settings['DATABASE_PASS'],
                    self._db_settings['DATABASE_IP'],
                    self._db_settings['DATABASE_PORT'],
                    self._db_settings['DATABASE_NAME']),
                poolclass=QueuePool,
                pool_size=5,
                max_overflow=10)
            base.metadata.create_all(engine)
            # Fix for forking
            register_after_fork(engine, engine.dispose)
            return engine
        except ValueError as e:  # Potentially corrupted DB config.
            self.error_handler.abort_framework(
                "Database configuration file is potentially corrupted. Please check %s\n[DB] %s" %
                (self.config.get_val('DATABASE_SETTINGS_FILE'), str(e)))
        except KeyError:  # Indicates incomplete db config file
            self.error_handler.abort_framework("Incomplete database configuration settings in %s" %
                                               self.config.get_val('DATABASE_SETTINGS_FILE'))
        except exc.OperationalError as e:
            self.error_handler.abort_framework("[DB] %s\nRun 'make db-run' to start/setup db" % str(e))
コード例 #4
0
ファイル: db.py プロジェクト: DePierre/owtf
    def CreateEngine(self, BaseClass):
        try:
            engine = create_engine(
                "postgresql+psycopg2://{0}:{1}@{2}:{3}/{4}".format(
                    self._db_settings['DATABASE_USER'],
                    self._db_settings['DATABASE_PASS'],
                    self._db_settings['DATABASE_IP'],
                    self._db_settings['DATABASE_PORT'],
                    self._db_settings['DATABASE_NAME']),
                poolclass=QueuePool,
                pool_size=5,
                max_overflow=10,)
            BaseClass.metadata.create_all(engine)

            # Fix for forking
            register_after_fork(engine, engine.dispose)

            return engine
        except ValueError as e:  # Potentially corrupted DB config.
            self.error_handler.FrameworkAbort(
                'Database configuration file is potentially corrupted. '
                'Please check ' + self.config.FrameworkConfigGet('DATABASE_SETTINGS_FILE') + '\n'
                '[DB] ' + str(e))
        except KeyError:  # Indicates incomplete db config file
            self.error_handler.FrameworkAbort(
                "Incomplete database configuration settings in "
                "" + self.config.FrameworkConfigGet('DATABASE_SETTINGS_FILE'))
        except exc.OperationalError as e:
            self.error_handler.FrameworkAbort(
                "[DB] " + str(e) + "\nRun scripts/db_run.sh to start/setup db")
コード例 #5
0
ファイル: cron.py プロジェクト: ISPM/ispm-django-chronograph
    def handle(self, *args, **options):

        def _after_fork(connection):
            #to avoid the postgres multiprocess problem when forking
            connection.close()
            connection.transaction_state = []
            connection.savepoint_state = 0
            connection._dirty = None
            connection._thread_ident = thread.get_ident()

        from django import db
        register_after_fork(db.connection, _after_fork)

        from chronograph.models import Job
        procs = []
        for job in Job.objects.due():
            if not job.check_is_running():
                # Only run the Job if it isn't already running
                proc = JobProcess(job)
                proc.start()
                procs.append(proc)
        
        logger.info("%d Jobs are due" % len(procs))
        
        # Keep looping until all jobs are done
        while procs:
            for i in range(len(procs)):
                if not procs[i].is_alive():
                    procs.pop(i)
                    break
                time.sleep(.1)
コード例 #6
0
ファイル: application.py プロジェクト: guidow/pyfarm-master
def get_sqlalchemy(app=None, use_native_unicode=True, session_options=None):
    """
    Constructs and returns an instance of :class:`.SQLAlchemy`.  Any keyword
    arguments provided will be passed to the constructor of :class:`.SQLAlchemy`
    """
    db = SQLAlchemy(
        app=app, use_native_unicode=use_native_unicode,
        session_options=session_options)

    # sqlite specific configuration for development
    if db.engine.name == "sqlite":
        @event.listens_for(Engine, "connect")
        def set_sqlite_pragma(dbapi_connection, connection_record):
            cursor = dbapi_connection.cursor()
            cursor.execute("PRAGMA foreign_keys=ON")
            cursor.execute("PRAGMA synchronous=OFF")
            cursor.execute("PRAGMA journal_mode=MEMORY")
            cursor.close()

    # When the web application is forked any existing connections
    # need to be disposed of.  This generally only seems to be a problem
    # with Postgres, more specifically psycopg2, but doing this globally
    # should not have any ill effects.  This problem was discovered while
    # testing the Agent using uwsgi 2.0.3, nginx 1.4.6, Postgres 9.1, and
    # psycopg2 2.5.2.  The bug does not present itself 100% of the time
    # making it difficult to test reliably.  The fix below is based
    # on a fix made to Celery which had the exact same problem ours did:
    #   https://github.com/celery/celery/issues/1564
    #
    # This implementation however is based on the suggestion made in Celery
    # 3.1's release notes:
    #    https://celery.readthedocs.org/en/latest/whatsnew-3.1.html
    register_after_fork(db.engine, db.engine.dispose)

    return db
コード例 #7
0
    def __init__(self, token, serializer, manager = None, authkey = None, exposed = None, incref = True):
        BaseProxy._mutex.acquire()
        try:
            tls_idset = BaseProxy._address_to_local.get(token.address, None)
            if tls_idset is None:
                tls_idset = (util.ForkAwareLocal(), ProcessLocalSet())
                BaseProxy._address_to_local[token.address] = tls_idset
        finally:
            BaseProxy._mutex.release()

        self._tls = tls_idset[0]
        self._idset = tls_idset[1]
        self._token = token
        self._id = self._token.id
        self._manager = manager
        self._serializer = serializer
        self._Client = listener_client[serializer][1]
        if authkey is not None:
            self._authkey = AuthenticationString(authkey)
        elif self._manager is not None:
            self._authkey = self._manager._authkey
        else:
            self._authkey = current_process().authkey
        if incref:
            self._incref()
        util.register_after_fork(self, BaseProxy._after_fork)
コード例 #8
0
ファイル: managers.py プロジェクト: dano/aioprocessing
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._threaded_acquire = False

        def _after_fork(obj):
            obj._threaded_acquire = False
        register_after_fork(self, _after_fork)
コード例 #9
0
ファイル: session.py プロジェクト: Aliced3645/celery
def get_engine(dburi, **kwargs):
    try:
        return _ENGINES[dburi]
    except KeyError:
        engine = _ENGINES[dburi] = create_engine(dburi, **kwargs)
        after_fork.registered = True
        register_after_fork(after_fork, after_fork)
        return engine
コード例 #10
0
ファイル: amqp.py プロジェクト: pcardune/celery
 def pool(self):
     if self._pool is None:
         self._pool = self.app.broker_connection().Pool(self.connection_max)
         try:
             from multiprocessing.util import register_after_fork
             register_after_fork(self, self._reset_after_fork)
         except ImportError:
             pass
     return self._pool
コード例 #11
0
 def __init__(self):
     self._key = 0
     self._cache = {}
     self._old_locks = []
     self._lock = threading.Lock()
     self._listener = None
     self._address = None
     self._thread = None
     register_after_fork(self, ResourceSharer._afterfork)
コード例 #12
0
ファイル: synchronize.py プロジェクト: 1310701102/sl4a
    def __init__(self, kind, value, maxvalue):
        sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
        debug('created semlock with handle %s' % sl.handle)
        self._make_methods()

        if sys.platform != 'win32':
            def _after_fork(obj):
                obj._semlock._after_fork()
            register_after_fork(self, _after_fork)
コード例 #13
0
ファイル: base.py プロジェクト: KarimAllah/celery
 def get_pool(self, next_connection_pool=False):
     if (self._pool is None) or next_connection_pool:
         try:
             from multiprocessing.util import register_after_fork
             register_after_fork(self, self._after_fork)
         except ImportError:
             pass
         limit = self.conf.BROKER_POOL_LIMIT
         self._pool = self.broker_connection(next_connection=next_connection_pool).Pool(limit)
     return self._pool
コード例 #14
0
ファイル: base.py プロジェクト: sunliwen/celery
 def pool(self):
     if self._pool is None:
         try:
             from multiprocessing.util import register_after_fork
             register_after_fork(self, self._after_fork)
         except ImportError:
             pass
         self._pool = self.broker_connection().Pool(
                         limit=self.conf.BROKER_POOL_LIMIT)
     return self._pool
コード例 #15
0
ファイル: tasks.py プロジェクト: ferewuz/balistos
def bootstrap_pyramid(signal, sender):
    import os
    from pyramid.paster import bootstrap
    sender.app.settings = \
        bootstrap(os.environ['BALISTOS_CONFIG'])['registry'].settings
    engine = engine_from_config(sender.app.settings, 'sqlalchemy.')

    register_after_fork(engine, engine.dispose)

    Session.configure(bind=engine)
コード例 #16
0
    def __init__(self, kind, value, maxvalue):
        sl = self._semlock = js.eval('new Semaphore(1);')
        sl.kind=kind
        sl.value=0
        sl.maxvalue=maxvalue
        debug('created semlock with handle %s' % sl.handle)
        self._make_methods()

        if sys.platform != 'win32':
            def _after_fork(obj):
                obj._semlock._after_fork()
            register_after_fork(self, _after_fork)
コード例 #17
0
ファイル: log.py プロジェクト: nEDM-TUM/HimbeereCouch
    def __init__(self, out_file = None):
        logging.Handler.__init__(self)

        if out_file is not None:
            self._handler = FH(out_file)
        else:
            self._handler = SH()
        self.queue = multiprocessing.Queue(-1)

        atexit.register(logging.shutdown)
        self._thrd = None
        self._is_child = False

        # Children will automatically register themselves as chilcren
        register_after_fork(self, MPLogHandler.set_is_child)
コード例 #18
0
 def __init__(self, maxsize = 0):
     if maxsize <= 0:
         maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
     self._maxsize = maxsize
     self._reader, self._writer = Pipe(duplex=False)
     self._rlock = Lock()
     self._opid = os.getpid()
     if sys.platform == 'win32':
         self._wlock = None
     else:
         self._wlock = Lock()
     self._sem = BoundedSemaphore(maxsize)
     self._after_fork()
     if sys.platform != 'win32':
         register_after_fork(self, Queue._after_fork)
コード例 #19
0
ファイル: database.py プロジェクト: SPRIME01/orb
    def connect(self):
        """
        Creates the backend instance for this database and connects it to its
        database server.
        
        :sa         backend
        
        :return     <bool> | success
        """
        backend = self.backend()
        if backend:
            # disconnect after a multiprocess fork or this will error out
            register_after_fork(self, self.disconnect)

            return backend.open()
        return False
コード例 #20
0
ファイル: celery.py プロジェクト: debon/abilian-core
  def flask_app(self):
    if has_app_context():
      return flask_current_app._get_current_object()

    self.flask_app_factory = symbol_by_name(self.flask_app_factory)
    app = self.flask_app_factory()

    if 'sentry' in app.extensions:
      from raven.contrib.celery import register_signal, register_logger_signal
      client = app.extensions['sentry'].client
      client.tags['process_type'] = 'celery task'
      register_signal(client)
      register_logger_signal(client)

    register_after_fork(app, self._setup_after_fork)
    return app
コード例 #21
0
ファイル: celery.py プロジェクト: abilian/abilian-core
    def flask_app(self):
        if has_app_context():
            return unwrap(flask_current_app)

        self.flask_app_factory = symbol_by_name(self.flask_app_factory)
        app = self.flask_app_factory()

        if "sentry" in app.extensions:
            from raven.contrib.celery import register_signal, register_logger_signal

            client = app.extensions["sentry"].client
            client.tags["process_type"] = "celery task"
            register_signal(client)
            register_logger_signal(client)

        register_after_fork(app, self._setup_after_fork)
        return app
コード例 #22
0
ファイル: queues.py プロジェクト: perkinslr/pypyjs-release
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = 2147483647L
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
コード例 #23
0
ファイル: __init__.py プロジェクト: jstacoder/oz
def engine():
    global _engine

    if _engine == None:
        kwargs = dict(echo=oz.settings["debug_sql"])

        if oz.settings["db_pool_size"]:
            kwargs["pool_size"] = oz.settings["db_pool_size"]
        if oz.settings["db_max_overflow"]:
            kwargs["max_overflow"] = oz.settings["db_max_overflow"]
        if oz.settings["db_pool_timeout"]:
            kwargs["pool_timeout"] = oz.settings["db_pool_timeout"]

        _engine = create_engine(oz.settings["db"], **kwargs)
        after_fork.registered = True
        register_after_fork(after_fork, after_fork)

    return _engine
コード例 #24
0
ファイル: shmem_sync.py プロジェクト: sdiehl/numpush
    def __init__(self, maxreaders=120):
        # Linux max semaphore sets is 120
        self.max = 120
        self._reader = Semaphore(120)
        self._writer = Semaphore(1)
        self._sleeping = Event()

        # Does this process hold the write?
        self.localwrite = False
        self.thread_id = currentThread()

        self.create_methods()

        def after_fork(obj):
            obj._reader._after_fork()
            obj._writer._after_fork()
            obj._sleeping._after_fork()

        register_after_fork(self, after_fork)
コード例 #25
0
def get_engine():
    ## method 1: local postsql database
    # engine = create_engine(URL(**LOCAL_PSQL_DATABASE))

    ## method 1.1: local postsql
    #engine = create_engine(local_psql_db_2)

    ## method 2: local sqlite database
    
    engine = create_engine(local_sqlite)

    
    ## method 3: database on AWS Server
    # engine = create_engine(postgresql_conn_aws)

    # 注册数据引擎
    register_after_fork(engine, engine.dispose)
    
    Base.metadata.create_all(engine)

    return engine
コード例 #26
0
ファイル: __init__.py プロジェクト: dailymuse/oz
def engine(connection_string=None):
    global _engines
    connection_string = connection_string or oz.settings["db"]

    if connection_string not in _engines:
        kwargs = dict(echo=oz.settings["debug_sql"])

        if oz.settings["db_pool_size"]:
            kwargs["pool_size"] = oz.settings["db_pool_size"]
        if oz.settings["db_max_overflow"]:
            kwargs["max_overflow"] = oz.settings["db_max_overflow"]
        if oz.settings["db_pool_timeout"]:
            kwargs["pool_timeout"] = oz.settings["db_pool_timeout"]

        first_engine = len(_engines) == 0
        _engines[connection_string] = create_engine(connection_string, **kwargs)

        if first_engine:
            after_fork.registered = True
            register_after_fork(after_fork, after_fork)

    return _engines[connection_string]
コード例 #27
0
    def __init__(self, token, serializer, manager=None,
                 authkey=None, exposed=None, incref=True):
        BaseProxy._mutex.acquire()
        try:
            tls_idset = BaseProxy._address_to_local.get(token.address, None)
            if tls_idset is None:
                tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
                BaseProxy._address_to_local[token.address] = tls_idset
        finally:
            BaseProxy._mutex.release()

        # self._tls is used to record the connection used by this
        # thread to communicate with the manager at token.address
        self._tls = tls_idset[0]

        # self._idset is used to record the identities of all shared
        # objects for which the current process owns references and
        # which are in the manager at token.address
        self._idset = tls_idset[1]

        self._token = token
        self._id = self._token.id
        self._manager = manager
        self._serializer = serializer
        self._Client = listener_client[serializer][1]

        if authkey is not None:
            self._authkey = AuthenticationString(authkey)
        elif self._manager is not None:
            self._authkey = self._manager._authkey
        else:
            self._authkey = current_process().authkey

        if incref:
            self._incref()

        util.register_after_fork(self, BaseProxy._after_fork)
コード例 #28
0
 def __init__(self):
     self.limit = 128
     self._after_fork()
     register_after_fork(self, LRUSharedCache._after_fork)
コード例 #29
0
    def __init__(self, app_name, *args, **kwargs):
        ADSCelery.__init__(self, app_name, *args, **kwargs)
        # this is used for bulk/efficient updates to metrics db
        self._metrics_engine = self._metrics_session = None
        if self._config.get('METRICS_SQLALCHEMY_URL', None):
            self._metrics_engine = create_engine(
                self._config.get('METRICS_SQLALCHEMY_URL', 'sqlite:///'),
                echo=self._config.get('SQLALCHEMY_ECHO', False))
            _msession_factory = sessionmaker()
            self._metrics_session = scoped_session(_msession_factory)
            self._metrics_session.configure(bind=self._metrics_engine)

            MetricsBase.metadata.bind = self._metrics_engine
            self._metrics_table = Table('metrics',
                                        MetricsBase.metadata,
                                        autoload=True,
                                        autoload_with=self._metrics_engine)
            register_after_fork(self._metrics_engine,
                                self._metrics_engine.dispose)

            insert_columns = {
                'an_refereed_citations':
                bindparam('an_refereed_citations', required=False),
                'an_citations':
                bindparam('an_citations', required=False),
                'author_num':
                bindparam('author_num', required=False),
                'bibcode':
                bindparam('bibcode'),
                'citations':
                bindparam('citations', required=False),
                'citation_num':
                bindparam('citation_num', required=False),
                'downloads':
                bindparam('downloads', required=False),
                'reads':
                bindparam('reads', required=False),
                'refereed':
                bindparam('refereed', required=False, value=False),
                'refereed_citations':
                bindparam('refereed_citations', required=False),
                'refereed_citation_num':
                bindparam('refereed_citation_num', required=False),
                'reference_num':
                bindparam('reference_num', required=False),
                'rn_citations':
                bindparam('rn_citations', required=False),
                'rn_citation_data':
                bindparam('rn_citation_data', required=False),
            }
            self._metrics_table_upsert = insert(MetricsModel).values(
                insert_columns)
            # on insert conflict we specify which columns update
            update_columns = {
                'an_refereed_citations':
                getattr(self._metrics_table_upsert.excluded,
                        'an_refereed_citations'),
                'an_citations':
                getattr(self._metrics_table_upsert.excluded, 'an_citations'),
                'author_num':
                getattr(self._metrics_table_upsert.excluded, 'author_num'),
                'citations':
                getattr(self._metrics_table_upsert.excluded, 'citations'),
                'citation_num':
                getattr(self._metrics_table_upsert.excluded, 'citation_num'),
                'downloads':
                getattr(self._metrics_table_upsert.excluded, 'downloads'),
                'reads':
                getattr(self._metrics_table_upsert.excluded, 'reads'),
                'refereed':
                getattr(self._metrics_table_upsert.excluded, 'refereed'),
                'refereed_citations':
                getattr(self._metrics_table_upsert.excluded,
                        'refereed_citations'),
                'refereed_citation_num':
                getattr(self._metrics_table_upsert.excluded,
                        'refereed_citation_num'),
                'reference_num':
                getattr(self._metrics_table_upsert.excluded, 'reference_num'),
                'rn_citations':
                getattr(self._metrics_table_upsert.excluded, 'rn_citations'),
                'rn_citation_data':
                getattr(self._metrics_table_upsert.excluded,
                        'rn_citation_data')
            }
            self._metrics_table_upsert = self._metrics_table_upsert.on_conflict_do_update(
                index_elements=['bibcode'], set_=update_columns)
コード例 #30
0
 def __init__(self):
     util.register_after_fork(self, lambda obj: obj.clear())
コード例 #31
0
ファイル: pools.py プロジェクト: FluidInc/8b-kombu
    limit = limit or 0
    glimit = _limit[0] or 0
    if limit < glimit:
        if not disable_limit_protection and (_used[0] and not force):
            raise RuntimeError("Can't lower limit after pool in use.")
        reset_after = True
    if limit != glimit:
        _limit[0] = limit
        for pool in _all_pools():
            pool.limit = limit
        if reset_after:
            reset()
    return limit


def reset(*args, **kwargs):
    for pool in _all_pools():
        try:
            pool.force_close_all()
        except Exception:
            pass
    for group in _groups:
        group.clear()
    _used[0] = False

try:
    from multiprocessing.util import register_after_fork
    register_after_fork(connections, reset)
except ImportError:  # pragma: no cover
    pass
コード例 #32
0
ファイル: sftp.py プロジェクト: ducharmemp/slowcooker
import os
import paramiko
from multiprocessing.util import register_after_fork

from source.config import CONFIG

t = paramiko.Transport((CONFIG['sftp']['transport']['hostname'],
                        CONFIG['sftp']['transport']['port']))
t.connect(**CONFIG['sftp']['auth'])

if not hasattr(os, 'register_at_fork'):
    register_after_fork(t, paramiko.Transport.atfork)
else:
    os.register_at_fork(t.atfork)

sftp = paramiko.SFTPClient.from_transport(t)
コード例 #33
0
ファイル: reduction.py プロジェクト: webiumsk/WOT-0.9.15-CT
def _reset(obj):
    global _cache
    global _listener
    global _lock
    for h in _cache:
        close(h)

    _cache.clear()
    _lock = threading.Lock()
    _listener = None
    return


_reset(None)
register_after_fork(_reset, _reset)

def _get_listener():
    global _listener
    if _listener is None:
        _lock.acquire()
        try:
            if _listener is None:
                debug('starting listener and thread for sending handles')
                _listener = Listener(authkey=current_process().authkey)
                t = threading.Thread(target=_serve)
                t.daemon = True
                t.start()
        finally:
            _lock.release()
コード例 #34
0
    def __init__(self, app_name, *args, **kwargs):
        """
        :param: app_name - string, name of the application (can be anything)
        :keyword: local_config - dict, configuration that should be applied
            over the default config (that is loaded from config.py and local_config.py)
        """
        proj_home = None
        if 'proj_home' in kwargs:
            proj_home = kwargs.pop('proj_home')
        self._config = load_config(extra_frames=1,
                                   proj_home=proj_home,
                                   app_name=app_name)

        local_config = None
        if 'local_config' in kwargs and kwargs['local_config']:
            local_config = kwargs.pop('local_config')
            self._config.update(local_config)  #our config
        if not proj_home:
            proj_home = self._config.get('PROJ_HOME', None)
        self.logger = setup_logging(
            app_name,
            proj_home=proj_home,
            level=self._config.get('LOGGING_LEVEL', 'INFO'),
            attach_stdout=self._config.get('LOG_STDOUT', False))

        # make sure that few important params are set for celery
        if 'broker' not in kwargs:
            kwargs['broker'] = self._config.get('CELERY_BROKER', 'pyamqp://'),
        if 'include' not in kwargs:
            cm = None
            if 'CELERY_INCLUDE' not in self._config:
                cm = self._get_callers_module()
                parts = cm.split('.')
                parts[-1] = 'tasks'
                cm = '.'.join(parts)
                if '.tasks' not in cm:
                    self.logger.debug(
                        'It seems like you are not importing from \'.tasks\': %s',
                        cm)
                self.logger.warn(
                    'CELERY_INCLUDE is empty, we have to guess it (correct???): %s',
                    cm)
            kwargs['include'] = self._config.get('CELERY_INCLUDE', [cm])

        Celery.__init__(self, *args, **kwargs)
        self._set_serializer()

        self.conf.update(
            self._config
        )  #celery's config (devs should be careful to avoid clashes)

        self._engine = self._session = None
        if self._config.get('SQLALCHEMY_URL', None):
            self._engine = create_engine(
                self._config.get('SQLALCHEMY_URL', 'sqlite:///'),
                echo=self._config.get('SQLALCHEMY_ECHO', False))
            self._session_factory = sessionmaker()
            self._session = scoped_session(self._session_factory)
            self._session.configure(bind=self._engine)
            register_after_fork(self._engine, self._engine.dispose)

        if self._config.get('CELERY_DEFAULT_EXCHANGE_TYPE',
                            'topic') != 'topic':
            self.logger.warn('The exchange type is not "topic" - ' \
                             'are you sure CELERY_DEFAULT_EXCHANGE_TYPE is set properly? (%s)',
                             self._config.get('CELERY_DEFAULT_EXCHANGE_TYPE', ''))

        self.exchange = Exchange(self._config.get('CELERY_DEFAULT_EXCHANGE',
                                                  'ads-pipeline'),
                                 type=self._config.get(
                                     'CELERY_DEFAULT_EXCHANGE_TYPE', 'topic'))

        self.forwarding_connection = None
        if self._config.get('OUTPUT_CELERY_BROKER', None):
            # kombu connection is lazy loaded, so it's ok to create now
            self.forwarding_connection = BrokerConnection(
                self._config['OUTPUT_CELERY_BROKER'])

            if self.conf.get('OUTPUT_TASKNAME', None):

                @self.task(name=self._config['OUTPUT_TASKNAME'],
                           exchange=self._config.get('OUTPUT_EXCHANGE',
                                                     'ads-pipeline'),
                           queue=self._config.get('OUTPUT_QUEUE',
                                                  'update-record'),
                           routing_key=self._config.get(
                               'OUTPUT_QUEUE', 'update-record'))
                def _forward_message(self, *args, **kwargs):
                    """A handler that can be used to forward stuff out of our
                    queue. It does nothing (it doesn't process data)"""
                    self.logger.error('We should have never been called directly! %s' % \
                                      (args, kwargs))

                self._forward_message = _forward_message
コード例 #35
0
    def _register(func):
        def wrapper(arg):
            func()

        _util.register_after_fork(_register, wrapper)
コード例 #36
0
import os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from multiprocessing.util import register_after_fork

engine = create_engine(
    "postgresql+psycopg2://postgres:sesame@localhost:5432/rentio")
register_after_fork(engine, engine.dispose)

Base = declarative_base()
Session = sessionmaker(bind=engine)

session = Session()
コード例 #37
0
app.config["COMPRESS_DEBUG"] = os.getenv("COMPRESS_DEBUG", "False") == "True"

# setup cache
cache = Cache(app, config={'CACHE_TYPE': 'simple'})

# so you can fake PATCH support (http://flask.pocoo.org/docs/patterns/methodoverrides/)
app.wsgi_app = HTTPMethodOverrideMiddleware(app.wsgi_app)

# database stuff
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config["SQLALCHEMY_POOL_SIZE"] = 60

db = SQLAlchemy(app)

# see https://github.com/celery/celery/issues/1564
register_after_fork(db.engine, db.engine.dispose)


# from http://docs.sqlalchemy.org/en/latest/core/pooling.html
# This recipe will ensure that a new Connection will succeed even if connections in the pool
# have gone stale, provided that the database server is actually running.
# The expense is that of an additional execution performed per checkout
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
    cursor = dbapi_connection.cursor()
    try:
        cursor.execute("SELECT 1")
    except:
        # optional - dispose the whole pool
        # instead of invalidating one at a time
        # connection_proxy._pool.dispose()
コード例 #38
0
 def __init__(self):
     util.register_after_fork(self, lambda obj: obj.clear())
コード例 #39
0
 def __init__(self):
     self._lock = threading.Lock()
     self.acquire = self._lock.acquire
     self.release = self._lock.release
     register_after_fork(self, ForkAwareThreadLock.__init__)
コード例 #40
0
    limit = limit or 0
    glimit = _limit[0] or 0
    if limit < glimit:
        if not disable_limit_protection and (_used[0] and not force):
            raise RuntimeError("Can't lower limit after pool in use.")
        reset_after = True
    if limit != glimit:
        _limit[0] = limit
        for pool in _all_pools():
            pool.limit = limit
        if reset_after:
            reset()
    return limit


def reset(*args, **kwargs):
    for pool in _all_pools():
        try:
            pool.force_close_all()
        except Exception:
            pass
    for group in _groups:
        group.clear()
    _used[0] = False

try:
    from multiprocessing.util import register_after_fork
    register_after_fork(connections, reset)
except ImportError:  # pragma: no cover
    pass
コード例 #41
0
#
# Support for a per-process server thread which caches pickled handles
#

_cache = set()

def _reset(obj):
    global _lock, _listener, _cache
    for h in _cache:
        close(h)
    _cache.clear()
    _lock = threading.Lock()
    _listener = None

_reset(None)
register_after_fork(_reset, _reset)

def _get_listener():
    global _listener

    if _listener is None:
        _lock.acquire()
        try:
            if _listener is None:
                debug('starting listener and thread for sending handles')
                _listener = Listener(authkey=current_process().authkey)
                t = threading.Thread(target=_serve)
                t.daemon = True
                t.start()
        finally:
            _lock.release()
コード例 #42
0
 def __init__(self):
     register_after_fork(self, lambda obj: obj.__dict__.clear())