Esempio n. 1
0
 def wrapped(request, *args, **kwargs):
     datastore_write = CapabilitySet("datastore_v3", capabilities=["write"])
     datastore_writable = datastore_write.will_remain_enabled_for(60)
     if not datastore_writable:
         logging.warn("Datastore is not writable. %s" % datastore_write.admin_message())
         if not request.is_xhr:
             return redirect(url_for(endpoint))
     return view(request, *args, **kwargs)
Esempio n. 2
0
    def run(self, checks=DEFAULT_CAPABILITY_CHECKS):
        """
        This method is used to run the specified set of tests
        """

        # reset the availability list
        self.availability = []

        # iterate over the list of checks and execute them
        for check_instance in checks:
            # ensure that we have a title
            if (not KEY_TITLE in check_instance) or (not KEY_PACKAGE
                                                     in check_instance):
                logging.warning("Invalid check defined: %s", check_instance)
                continue

            logging.debug("Running the %s check", check_instance[KEY_TITLE])

            # initialise determine the capabilities we are looking for
            caps = []
            if KEY_CAPABILITIES in check_instance:
                caps = check_instance[KEY_CAPABILITIES]

            # create the capability set instance
            capset = CapabilitySet(check_instance[KEY_PACKAGE], caps, ['*'])

            # create the service availability record
            service_avail = {
                'title': check_instance[KEY_TITLE],

                # determine whether the service is available now
                'avail_now': capset.is_enabled(),

                # determine whether the service will still be available in one hour
                'avail_hour': capset.will_remain_enabled_for(3600),

                # update the availability array
                'avail_day': capset.will_remain_enabled_for(86400),
            }

            # log the results
            logging.debug("Completed availability check, results below\n%s",
                          service_avail)

            # add the availability to the service list
            self.availability += service_avail
Esempio n. 3
0
 def wrapped(request, *args, **kwargs):
   datastore_write = CapabilitySet('datastore_v3', capabilities=['write'])
   datastore_writable = datastore_write.will_remain_enabled_for(60)
   if not datastore_writable:
     logging.warn('Datastore is not writable. %s' %
                  datastore_write.admin_message())
     if not request.is_xhr:
       return redirect(url_for(endpoint))
   return view(request, *args, **kwargs)
Esempio n. 4
0
    def run(self, checks = DEFAULT_CAPABILITY_CHECKS):
        """
        This method is used to run the specified set of tests
        """
        
        # reset the availability list
        self.availability = []
        
        # iterate over the list of checks and execute them
        for check_instance in checks:
            # ensure that we have a title 
            if (not KEY_TITLE in check_instance) or (not KEY_PACKAGE in check_instance):
                logging.warning("Invalid check defined: %s", check_instance)
                continue
            
            logging.debug("Running the %s check", check_instance[KEY_TITLE])
            
            # initialise determine the capabilities we are looking for
            caps = []
            if KEY_CAPABILITIES in check_instance:
                caps = check_instance[KEY_CAPABILITIES]
                
            # create the capability set instance
            capset = CapabilitySet(check_instance[KEY_PACKAGE], caps, ['*'])
            
            # create the service availability record
            service_avail = {
                'title': check_instance[KEY_TITLE],
                
                # determine whether the service is available now
                'avail_now': capset.is_enabled(),

                # determine whether the service will still be available in one hour
                'avail_hour': capset.will_remain_enabled_for(3600),
                
                # update the availability array
                'avail_day': capset.will_remain_enabled_for(86400),
            }
            
            # log the results
            logging.debug("Completed availability check, results below\n%s", service_avail)
            
            # add the availability to the service list
            self.availability += service_avail
def incr_count(entity_key, counter_name, txn_def, incr_amt=1, interval=_DEF_UPDATE_INTERVAL, entity_val=0):
    ''' Increment a counter.  Updates a backing
    entity with contents of memcached.

    Can handle increments only right now. Returns nothing.
    Raises exception on error.'''

    # Don't worry about decrements right now.
    if incr_amt < 0:
        return 0

    # Generate memcached keys.
    lock_key  = _get_key(_MACRO_LOCK,  counter_name, entity_key)
    count_key = _get_key(_MACRO_COUNT, counter_name, entity_key)
    incr_key  = _get_key(_MACRO_INCR,  counter_name, entity_key)
    if _DEBUG: logging.debug("keys: %s %s %s" % (lock_key, count_key, incr_key))

    # Check to see if memecached is up.
    look_ahead_time = 10 + interval
    memcache_ops    = CapabilitySet('memcache', methods=['add'])
    memcache_down   = not memcache_ops.will_remain_enabled_for(look_ahead_time)

    # If memcache is down or interval seconds has passed, update
    # the datastore.
    if memcache_down or memcache.add(lock_key, None, time=interval):
        # Update the datastore
        incr = int(memcache.get(incr_key) or 0) + incr_amt
        if _DEBUG: logging.debug("incr(%s): updating datastore with %d", counter_name, incr)
        memcache.set(incr_key, 0)
        try:
            stored_count = db.run_in_transaction(txn_def, entity_key, incr, counter_name)
        except:
            memcache.set(incr_key, incr)
            logging.error('Counter(%s): unable to update datastore counter.', counter_name)
            raise
        memcache.set(count_key, stored_count)
        return stored_count
    # Majority of the time, this branch is taken.
    else:
        incr = memcache.get(incr_key)
        if incr is None:
            # incr_key in memcache should be set.  If not, two possibilities:
            # 1) memcache has failed between last datastore update.
            # 2) this branch has executed before memcache set in update branch (unlikely)
            stored_count = db.run_in_transaction(txn_def, entity_key, incr_amt, counter_name)
            memcache.set(count_key, stored_count)
            memcache.set(incr_key, 0)
            logging.error('Counter(%s): possible memcache failure in update interval.',
                          counter_name)
            return stored_count
        # Memcache increment.
        else:
            memcache.incr(incr_key, delta=incr_amt)
            if _DEBUG: logging.debug("incr(%s): incrementing memcache with %d", counter_name, incr_amt)
            return get_count(entity_key, counter_name, entity_val)
Esempio n. 6
0
 def wrapped(request, *args, **kwargs):
   datastore_write = CapabilitySet('datastore_v3', capabilities=['write'])
   datastore_writable = datastore_write.will_remain_enabled_for(60)
   if not datastore_writable:
     logging.warn('Datastore is not writable. %s' %
                  datastore_write.admin_message())
     if not request.is_xhr:
       # Saving session will also fail.
       if hasattr(request, 'session'):
         del(request.session)
       return redirect(url_for(endpoint))
   return view(request, *args, **kwargs)
Esempio n. 7
0
    def decr(self, value=-1):
        #if value > 0:
        #    raise ValueError('CachedCounter cannot handle negative numbers.')
        
        def update_count(name, decr, error_possible=False):
            entity = Counter.get_by_key_name(name)
            if entity:
                entity.count += decr
                logging.debug("decr(%s): update_count on retrieved entity by %d to %d", name, decr, entity.count)
            else:
                entity = Counter(key_name=name, count=decr)
                logging.debug("decr(%s): update_count on new entity set to %d", name, decr)
            if error_possible:
                entity.error_possible = True
            entity.put()
            return entity.count

        look_ahead_time = 10 + self._update_interval
        memcache_ops = CapabilitySet('memcache', methods=['add'])
        memcache_down = not memcache_ops.will_remain_enabled_for(look_ahead_time)
        if memcache_down or memcache.add(self._lock_key, None, time=self._update_interval):
            # Update the datastore
            decr = int(memcache.get(self._decr_key) or 0) + value
            logging.debug("decr(%s): updating datastore with %d", self._name, decr)
            memcache.set(self._decr_key, 0)
            try:
                stored_count = db.run_in_transaction(update_count, self._name, decr)
            except:
                memcache.set(self._decr_key, decr)
                logging.error('Counter(%s): unable to update datastore counter.', self._name)
                raise
            memcache.set(self._count_key, stored_count)
            return stored_count
        else:
            decr = memcache.get(self._decr_key)
            if decr is None:
                # _decr_key in memcache should be set.  If not, two possibilities:
                # 1) memcache has failed between last datastore update.
                # 2) this branch has executed before memcache set in update branch (unlikely)
                stored_count = db.run_in_transaction(update_count, 
                                    self._name, value, error_possible=True)
                memcache.set(self._count_key, stored_count)
                memcache.set(self._decr_key, 0)
                logging.error('Counter(%s): possible memcache failure in update interval.',
                              self._name)
                return stored_count
            else:
                memcache.decr(self._decr_key, delta=value)
                logging.debug("decr(%s): decrementing memcache with %d", self._name, value)
                return self.count