示例#1
0
    def handle(self):
        package_id = self.request.input.package_id
        server_token = self.server.fs_server_config.main.token
        lock_name = '{}{}:{}'.format(KVDB.LOCK_PACKAGE_UPLOADING, server_token,
                                     package_id)
        already_deployed_flag = '{}{}:{}'.format(
            KVDB.LOCK_PACKAGE_ALREADY_UPLOADED, server_token, package_id)

        # TODO: Stuff below - and the methods used - needs to be rectified.
        # As of now any worker process will always set deployment status
        # to DEPLOYMENT_STATUS.DEPLOYED but what we really want is per-worker
        # reporting of whether the deployment succeeded or not.

        with Lock(lock_name, self.server.deployment_lock_expires,
                  self.server.deployment_lock_timeout, self.server.kvdb.conn):
            with closing(self.odb.session()) as session:

                # Only the first worker will get here ..
                if not self.server.kvdb.conn.get(already_deployed_flag):
                    self.backup_current_work_dir()

                    self.server.kvdb.conn.set(
                        already_deployed_flag,
                        dumps(
                            {'create_time_utc':
                             datetime.utcnow().isoformat()}))
                    self.server.kvdb.conn.expire(
                        already_deployed_flag,
                        self.server.deployment_lock_expires)

                # .. all workers get here.
                self.deploy_package(self.request.input.package_id, session)
示例#2
0
    def handle(self):

        # Grab a distributed lock so concurrent updates don't interfere with each other
        with Lock(UPDATE_LOCK):

            # We always use UTC
            today = str(datetime.utcnow().date())

            # Key in the cache that data concerning current date and given currencies
            # is stored under
            cache_key = RATES_PATTERN.format(self.request.input.pair)

            # Fetch currently cached value, if any has been already stored at all
            old_value = Decimal(self.kvdb.conn.hget(cache_key, today) or 0)

            # Fetch new data from the backend
            response = self.invoke(FetchRates.get_name(),
                                   {'pair': self.request.input.pair})
            new_value = Decimal(response['response']['rate'])

            # Either use the new value directly (because there wasn't any old one)
            # or find the average of old and new one
            new_value = new_value if not old_value else (old_value +
                                                         new_value) / 2

            # Store the new value in cache
            self.kvdb.conn.hset(cache_key, today, new_value)
示例#3
0
    def handle(self):

        # last_permitted is the earliest possible date to keep in the cache,
        # any values earlier than that one will be deleted.
        today = datetime.utcnow().date()
        last_permitted = str(today -
                             timedelta(days=int(self.request.raw_request)))

        # Grab a distributed lock to safely update the contents of the cache
        with Lock(UPDATE_LOCK):

            # Values are deleted as part of an either-or transaction pipeline.
            # Either all will be deleted or none will be.
            with self.kvdb.conn.pipeline() as p:

                # Find all pairs that need to be deleted and add them to pipeline
                for key in self.kvdb.conn.keys(RATES_PATTERN.format('*')):
                    for date in self.kvdb.conn.hkeys(key):
                        if date < last_permitted:
                            p.hdel(key, date)

                            # Output message to logs so users understand
                            # there's some activity going on
                            self.logger.info('Deleting {}/{}'.format(
                                key, date))

                # Execute the whole transaction as a single unit
                p.execute()
示例#4
0
    def test_lock_fail(self):
        Lock = self._makeOne()

        bv = threading.Event()
        ev = threading.Event()

        def get_lock():
            with Lock(self.key):
                bv.set()
                ev.wait()
        t = threading.Thread(target=get_lock)
        t.start()
        ac = []

        @raises(self._lockException())
        def test_it():
            with Lock(self.key, timeout=0):
                ac.append(10)  # pragma: nocover
        bv.wait()
        test_it()
        eq_(ac, [])
        ev.set()
        t.join()
        with Lock(self.key, timeout=0):
            ac.append(10)
        eq_(ac, [10])
示例#5
0
文件: delivery.py 项目: xbx/zato
 def on_target_completed(self, target_type, target, delivery, start, end, target_ok, target_self_info, err_info=None):
     now = datetime.utcnow().isoformat()
     task_id = delivery['task_id']
     lock_name = '{}{}'.format(KVDB.LOCK_DELIVERY, task_id)
     total_time = str(end - start)
     
     with Lock(lock_name, self.delivery_lock_timeout, LOCK_TIMEOUT, self.kvdb.conn):
         entry_ctx = dumps({
             'target_self_info': target_self_info,
             'total_time': total_time
         })
         
         with closing(self.odb.session()) as session:
             delivery = session.merge(self.get_delivery(task_id))
             delivery.state = DELIVERY_STATE.IN_PROGRESS_TARGET_OK if target_ok else DELIVERY_STATE.IN_PROGRESS_TARGET_FAILURE
             delivery.last_used = now
             delivery.definition.last_used = now
             delivery.target_count += 1
             
             history = DeliveryHistory()
             history.task_id = task_id
             history.entry_type = DELIVERY_HISTORY_ENTRY.TARGET_OK if target_ok else DELIVERY_HISTORY_ENTRY.TARGET_FAILURE
             history.entry_time = now
             history.entry_ctx = entry_ctx
             history.delivery = delivery
             history.resubmit_count = delivery.resubmit_count
             
             session.add(delivery)
             session.add(history)
             
             session.commit()
             
             self._invoke_callbacks(target, target_type, delivery, target_ok, False, DELIVERY_CALLBACK_INVOKER.TARGET)
示例#6
0
def lock_factory(key,
                 host=None,
                 port=None,
                 redis_=None,
                 expires=60 * 5,
                 timeout=5):
    # 全局性锁,用于性能采集,持有锁的有效时间为60*5秒,申请锁的超时时间为5s
    if not redis_:
        redis_ = redis_connect(host, port)
    return Lock(key, expires=expires, timeout=timeout, redis=redis_)
示例#7
0
    def check_target(self, item):
        self.logger.debug('Checking name/target/task_id [%s]', item.log_name)

        if self.is_deleted(item.def_name):
            self.logger.info('Stopping [%s] (definition.is_deleted->True)',
                             item.log_name)
            return

        now_dt = datetime.utcnow()
        now = now_dt.isoformat()
        lock_name = '{}{}'.format(KVDB.LOCK_DELIVERY, item.task_id)

        with closing(self.odb.session()) as session:
            try:
                delivery = session.merge(self.get_delivery(item.task_id))
                payload = delivery.payload.payload
            except orm_exc.NoResultFound:
                # Apparently the delivery was deleted since the last time we were scheduled to run
                self.logger.info('Stopping [%s] (NoResultFound->True)',
                                 item.log_name)
                return

        # Fetch new values because it's possible they have been changed since the last time we were invoked
        item['payload'] = payload
        item['args'] = delivery.args
        item['kwargs'] = delivery.kwargs

        with Lock(lock_name, self.delivery_lock_timeout, LOCK_TIMEOUT,
                  self.kvdb.conn):

            # Target did not reply at all hence we're entering in-doubt
            if delivery.source_count > delivery.target_count:
                self._on_in_doubt(item, delivery, now)

            else:
                # The target has confirmed the invocation in an expected time so we
                # now need to check if it was successful. If it was, this is where it ends,
                # it wasn't, we'll try again as it was originally configured unless
                # it was the last retry.

                target_ok = delivery.state == DELIVERY_STATE.IN_PROGRESS_TARGET_OK

                # All good, we can stop now.
                if target_ok:
                    self.finish_delivery(delivery, target_ok, now_dt, item)

                # Not so good, we know there was an error.
                else:
                    # Can we try again?
                    if delivery.source_count < item.retry_repeats:
                        self.retry(delivery, item, now)

                    # Nope, that was the last attempt.
                    else:
                        self.finish_delivery(delivery, target_ok, now_dt, item)
示例#8
0
    def lock(self, name=None, expires=20, timeout=10, backend=None):
        """ Creates a Redis-backed distributed lock.

        name - defaults to self.name effectively making access to this service serialized
        expires - defaults to 20 seconds and is the max time the lock will be held
        timeout - how long (in seconds) we will wait to acquire the lock before giving up and raising LockTimeout
        backend - a Redis connection object, defaults to self.kvdb.conn
        """
        name = '{}{}'.format(KVDB.LOCK_SERVICE_PREFIX, name or self.name)
        backend = backend or self.kvdb.conn
        return Lock(name, expires, timeout, backend)
示例#9
0
    def _start_job(self, job):
        try:
            lock = Lock(job['url'], redis=self.redis, timeout=1)
            lock.acquire()

            self.working_url = job['url']

            if self.working_url:
                self.domain_name, domain_url = get_domain_from_url(
                    self.working_url)

            self._ping_api()
            job['lock'] = lock

            return True
        except LockTimeout:
            job['lock'] = None
            return False
示例#10
0
文件: cache.py 项目: 0x1997/retools
    def load(cls,
             region,
             namespace,
             key,
             regenerate=True,
             callable=None,
             statistics=None):
        """Load a value from Redis, and possibly recreate it

        This method is used to load a value from Redis, and usually
        regenerates the value using the callable when provided.

        If ``regenerate`` is ``False`` and a ``callable`` is not passed
        in, then :obj:`~retools.cache.NoneMarker` will be returned.

        :param region: Region name
        :type region: string
        :param namespace: Namespace for the value
        :type namespace: string
        :param key: Key for this value under the namespace
        :type key: string
        :param regenerate: If False, then existing keys will always be
                           returned regardless of cache expiration. In the
                           event that there is no existing key and no
                           callable was provided, then a NoneMarker will
                           be returned.
        :type regenerate: bool
        :param callable: A callable to use when the cached value needs to be
                         created
        :param statistics: Whether or not hit/miss statistics should be
                           updated
        :type statistics: bool

        """
        if statistics is None:
            statistics = cls.statistics
        redis = global_connection.redis
        now = time.time()
        region_settings = cls.regions[region]
        expires = region_settings['expires']
        redis_expiration = region_settings['redis_expiration']

        keys = CacheKey(region=region, namespace=namespace, key=key)

        # Create a transaction to update our hit counter for today and
        # retrieve the current value.
        if statistics:
            p = redis.pipeline(transaction=True)
            p.hgetall(keys.redis_key)
            p.get(keys.redis_hit_key)
            p.incr(keys.redis_hit_key)
            results = p.execute()
            result, existing_hits = results[0], results[1]
            if existing_hits is None:
                existing_hits = 0
            else:
                existing_hits = int(existing_hits)
        else:
            result = redis.hgetall(keys.redis_key)

        expired = True
        if result and now - float(result['created']) < expires:
            expired = False

        if (result and not regenerate) or not expired:
            # We have a result and were told not to regenerate so
            # we always return it immediately regardless of expiration,
            # or its not expired
            return cPickle.loads(result['value'])

        if not result and not regenerate:
            # No existing value, but we were told not to regenerate it and
            # there's no callable, so we return a NoneMarker
            return NoneMarker

        # Don't wait for the lock if we have an old value
        if result and 'value' in result:
            timeout = 0
        else:
            timeout = 60 * 60

        try:
            with Lock(keys.lock_key, expires=expires, timeout=timeout):
                # Did someone else already create it?
                result = redis.hgetall(keys.redis_key)
                now = time.time()
                if result and 'value' in result and \
                   now - float(result['created']) < expires:
                    return cPickle.loads(result['value'])

                value = callable()

                p = redis.pipeline(transaction=True)
                p.hmset(keys.redis_key, {
                    'created': now,
                    'value': cPickle.dumps(value, -1)
                })
                p.expire(keys.redis_key, redis_expiration)
                cls._add_tracking(p, region, namespace, key)
                if statistics:
                    p.getset(keys.redis_hit_key, 0)
                    new_hits = int(p.execute()[0])
                else:
                    p.execute()
        except LockTimeout:
            if result:
                return cPickle.loads(result['value'])
            else:
                # log some sort of error?
                return NoneMarker

        # Nothing else to do if not recording stats
        if not statistics:
            return value

        misses = new_hits - existing_hits
        if misses:
            p = redis.pipeline(transaction=True)
            p.incr(keys.redis_hit_key, amount=existing_hits)
            p.incr(keys.redis_miss_key, amount=misses)
            p.execute()
        else:
            redis.incr(keys.redis_hit_key, amount=existing_hits)
        return value
示例#11
0
    def fill_job_bucket(self,
                        expiration,
                        look_ahead_pages=1000,
                        avg_links_per_page=10.0):
        try:
            with Lock('next-job-fill-bucket-lock', redis=self.redis):
                logging.info('Refilling job bucket. Lock acquired...')
                expired_time = datetime.utcnow() - timedelta(
                    seconds=expiration)

                active_domains = Domain.get_active_domains(self.db)

                if not active_domains:
                    return

                active_domains_ids = [item.id for item in active_domains]

                limiter_buckets = self.get_limiter_buckets(
                    active_domains, avg_links_per_page)

                all_domains_pages_in_need_of_review = []

                for domain_id in active_domains_ids:
                    pages = self.db \
                        .query(
                            Page.uuid,
                            Page.url,
                            Page.score,
                            Page.last_review_date
                        ) \
                        .filter(Page.domain_id == domain_id) \
                        .filter(or_(
                            Page.last_review_date == None,
                            Page.last_review_date <= expired_time
                        ))[:look_ahead_pages]

                    if pages:
                        all_domains_pages_in_need_of_review.append(pages)

                logging.debug(
                    'Total of %d pages found to add to redis.' % (sum([
                        len(item)
                        for item in all_domains_pages_in_need_of_review
                    ])))

                item_count = int(self.redis.zcard('next-job-bucket'))
                current_domain = 0
                while item_count < look_ahead_pages and len(
                        all_domains_pages_in_need_of_review) > 0:
                    if current_domain >= len(
                            all_domains_pages_in_need_of_review):
                        current_domain = 0

                    item = all_domains_pages_in_need_of_review[
                        current_domain].pop(0)

                    has_limit = True
                    logging.debug('Available Limit Buckets: %s' %
                                  limiter_buckets)
                    for index, (limit,
                                available) in enumerate(limiter_buckets):
                        if limit.matches(item.url):
                            if available <= 0:
                                has_limit = False
                                break
                            limiter_buckets[index] = (limit, available - 1)

                    if has_limit:
                        self.add_next_job_bucket(item.uuid, item.url)
                        item_count += 1

                    # if there are not any more pages in this domain remove it from dictionary
                    if not all_domains_pages_in_need_of_review[current_domain]:
                        del all_domains_pages_in_need_of_review[current_domain]

                    current_domain += 1

                logging.debug('ADDED A TOTAL of %d ITEMS TO REDIS...' %
                              item_count)

        except LockTimeout:
            logging.info("Can't acquire lock. Moving on...")
示例#12
0
def credit_store(store):
    """
    :return: Number of download transactions credited
    """

    credited = 0

    if not store.email:
        logger.info("Store lacks email %s", store.name)
        return 0

    if not store.btc_address:
        logger.info("Store lacks BTC address %s", store.name)
        return 0

    # No Py3k compatibility
    #if not bitcoinaddress.validate(store.btc_address):
    #    logger.error("Store %s not valid BTC address %s", store.name, store.btc_address)
    #    return 0

    logger.debug("Starting to credit store %s", store.name)

    # Some additional protection against not accidentelly running
    # parallel with distributed lock
    with Lock("credit_store_%d" % store.id):

        # Split up to two separate db transactions
        # to minitize the risk of getting blockchain
        # and db out of sync because of mail errors and such

        # Which of the transactions we have not yet credited
        uncredited_transaction_ids = []

        with transaction.atomic():
            uncredited_transactions = store.downloadtransaction_set.filter(credited_at__isnull=True, btc_received_at__isnull=False)

            uncredited_transaction_ids = list(uncredited_transactions.values_list("id", flat=True))

            sums = uncredited_transactions.aggregate(Sum('btc_amount'))
            total = sums.get("btc_amount__sum") or Decimal("0")

            if total == 0:
                logger.info("Store %s no transactions to credit", store.name)
                return 0

            credit_transactions(store, uncredited_transactions)

        # Reload after tx commit
        uncredited_transactions = store.downloadtransaction_set.filter(id__in=uncredited_transaction_ids)

        # Archive addresses as used when blockchain.info backend is enabled
        # if uncredited_transactions.count() > 0:
        #    if uncredited_transactions[0].payment_source == models.DownloadTransaction.PAYMENT_SOURCE_BLOCKCHAIN:
        #        blockchain.archive(uncredited_transactions.values_list("btc_address", flat=True))

        emailer.mail_store_owner(store, "{} payments".format(settings.SITE_NAME), "email/credit_transactions.html", dict(store=store, site_name=settings.SITE_NAME, transactions=uncredited_transactions))

        credited += uncredited_transactions.count()

        logger.debug("Credited %d transcations", credited)

    return credited
示例#13
0
文件: delivery.py 项目: xbx/zato
        
        with closing(self.odb.session()) as session:
            try:
                delivery = session.merge(self.get_delivery(item.task_id))
                payload = delivery.payload.payload
            except orm_exc.NoResultFound, e:
                # Apparently the delivery was deleted since the last time we were scheduled to run
                self.logger.info('Stopping [%s] (NoResultFound->True)', item.log_name)
                return
        
        # Fetch new values because it's possible they have been changed since the last time we were invoked
        item['payload'] = payload
        item['args'] = delivery.args
        item['kwargs'] = delivery.kwargs
        
        with Lock(lock_name, self.delivery_lock_timeout, LOCK_TIMEOUT, self.kvdb.conn):

            # Target did not reply at all hence we're entering in-doubt
            if delivery.source_count > delivery.target_count:
                self._on_in_doubt(item, delivery, now)
                
            else:
                # The target has confirmed the invocation in an expected time so we
                # now need to check if it was successful. If it was, this is where it ends,
                # it wasn't, we'll try again as it was originally configured unless
                # it was the last retry.
                
                target_ok = delivery.state == DELIVERY_STATE.IN_PROGRESS_TARGET_OK
                
                # All good, we can stop now.
                if target_ok:
示例#14
0
 def test_it():
     with Lock(self.key, timeout=1):
         ac.append(10)  # pragma: nocover
示例#15
0
 def get_lock():
     with Lock(self.key):
         bv.set()
         ev.wait()
示例#16
0
 def test_lock_runs(self):
     Lock = self._makeOne()
     x = 0
     with Lock(self.key):
         x += 1